diff --git a/ansiblelater/command/candidates.py b/ansiblelater/command/candidates.py index e0407cf..1b65f44 100644 --- a/ansiblelater/command/candidates.py +++ b/ansiblelater/command/candidates.py @@ -73,14 +73,16 @@ class Candidate(object): version = utils.standards_latest(self.standards) if self.expected_version: if isinstance(self, RoleFile): - LOG.warn("%s %s is in a role that contains a meta/main.yml without a declared " - "standards version. " - "Using latest standards version %s" % - (type(self).__name__, self.path, version)) + LOG.warning( + "%s %s is in a role that contains a meta/main.yml without a declared " + "standards version. " + "Using latest standards version %s" % + (type(self).__name__, self.path, version)) else: - LOG.warn("%s %s does not present standards version. " - "Using latest standards version %s" % - (type(self).__name__, self.path, version)) + LOG.warning( + "%s %s does not present standards version. " + "Using latest standards version %s" % + (type(self).__name__, self.path, version)) else: LOG.info("%s %s declares standards version %s" % (type(self).__name__, self.path, version)) @@ -125,13 +127,13 @@ class Candidate(object): err_labels.update(err.to_dict()) if not standard.version: - LOG.warn("{id}Best practice '{name}' not met:\n{path}:{error}".format( + LOG.warning("{id}Best practice '{name}' not met:\n{path}:{error}".format( id=self._format_id(standard.id), name=standard.name, path=self.path, error=err), extra=flag_extra(err_labels)) elif LooseVersion(standard.version) > LooseVersion(self.version): - LOG.warn("{id}Future standard '{name}' not met:\n{path}:{error}".format( + LOG.warning("{id}Future standard '{name}' not met:\n{path}:{error}".format( id=self._format_id(standard.id), name=standard.name, path=self.path, diff --git a/ansiblelater/logger.py b/ansiblelater/logger.py index 3fd24ea..e781a9d 100644 --- a/ansiblelater/logger.py +++ b/ansiblelater/logger.py @@ -128,10 +128,10 @@ def _get_warn_handler(json=False): def _get_info_handler(json=False): - handler = logging.StreamHandler(sys.stderr) + handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.INFO) handler.addFilter(LogFilter(logging.INFO)) - handler.setFormatter(MultilineFormatter(info("%(message)s"))) + handler.setFormatter(MultilineFormatter(info(CONSOLE_FORMAT))) if json: handler.setFormatter(MultilineJsonFormatter(JSON_FORMAT)) diff --git a/ansiblelater/tests/unit/test_logging.py b/ansiblelater/tests/unit/test_logging.py index fd35fa3..5a84590 100644 --- a/ansiblelater/tests/unit/test_logging.py +++ b/ansiblelater/tests/unit/test_logging.py @@ -1,41 +1,42 @@ +"""Test logging module.""" + from __future__ import print_function import colorama -import logging -from ansiblelater.utils import info, warn, abort, error, should_do_markup +from ansiblelater import logger -def test_abort(capsys, mocker): - abort('foo') - stdout, _ = capsys.readouterr() +def test_critical(capsys, mocker): + log = logger.get_logger("test_critical") + log.critical("foo") + _, stderr = capsys.readouterr() - print('{}{}{}'.format(colorama.Fore.RED, 'FATAL: foo'.rstrip(), + print("{}{}{}".format(colorama.Fore.RED, "CRITICAL: foo".rstrip(), colorama.Style.RESET_ALL)) x, _ = capsys.readouterr() - assert x == stdout + assert x == stderr def test_error(capsys, mocker): - error('foo') - stdout, _ = capsys.readouterr() + log = logger.get_logger("test_error") + log.error("foo") + _, stderr = capsys.readouterr() - print('{}{}{}'.format(colorama.Fore.RED, 'ERROR: foo'.rstrip(), + print("{}{}{}".format(colorama.Fore.RED, "ERROR: foo".rstrip(), colorama.Style.RESET_ALL)) x, _ = capsys.readouterr() - assert x == stdout + assert x == stderr def test_warn(capsys, mocker): - settings = mocker.MagicMock() - settings.log_level = getattr(logging, 'WARNING') - - warn('foo', settings) + log = logger.get_logger("test_warn") + log.warning("foo") stdout, _ = capsys.readouterr() - print('{}{}{}'.format(colorama.Fore.YELLOW, 'WARN: foo'.rstrip(), + print("{}{}{}".format(colorama.Fore.YELLOW, "WARNING: foo".rstrip(), colorama.Style.RESET_ALL)) x, _ = capsys.readouterr() @@ -43,13 +44,11 @@ def test_warn(capsys, mocker): def test_info(capsys, mocker): - settings = mocker.MagicMock() - settings.log_level = getattr(logging, 'INFO') - - info('foo', settings) + log = logger.get_logger("test_info") + log.info("foo") stdout, _ = capsys.readouterr() - print('{}{}{}'.format(colorama.Fore.BLUE, 'INFO: foo'.rstrip(), + print("{}{}{}".format(colorama.Fore.BLUE, "INFO: foo".rstrip(), colorama.Style.RESET_ALL)) x, _ = capsys.readouterr() @@ -57,26 +56,26 @@ def test_info(capsys, mocker): def test_markup_detection_pycolors0(monkeypatch): - monkeypatch.setenv('PY_COLORS', '0') - assert not should_do_markup() + monkeypatch.setenv("PY_COLORS", "0") + assert not logger._should_do_markup() def test_markup_detection_pycolors1(monkeypatch): - monkeypatch.setenv('PY_COLORS', '1') - assert should_do_markup() + monkeypatch.setenv("PY_COLORS", "1") + assert logger._should_do_markup() def test_markup_detection_tty_yes(mocker): - mocker.patch('sys.stdout.isatty', return_value=True) - mocker.patch('os.environ', {'TERM': 'xterm'}) - assert should_do_markup() + mocker.patch("sys.stdout.isatty", return_value=True) + mocker.patch("os.environ", {"TERM": "xterm"}) + assert logger._should_do_markup() mocker.resetall() mocker.stopall() def test_markup_detection_tty_no(mocker): - mocker.patch('os.environ', {}) - mocker.patch('sys.stdout.isatty', return_value=False) - assert not should_do_markup() + mocker.patch("os.environ", {}) + mocker.patch("sys.stdout.isatty", return_value=False) + assert not logger._should_do_markup() mocker.resetall() mocker.stopall() diff --git a/env_27/bin/bandit b/env_27/bin/bandit new file mode 100755 index 0000000..86dab5f --- /dev/null +++ b/env_27/bin/bandit @@ -0,0 +1,10 @@ +#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from bandit.cli.main import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env_27/bin/bandit-baseline b/env_27/bin/bandit-baseline new file mode 100755 index 0000000..2acc763 --- /dev/null +++ b/env_27/bin/bandit-baseline @@ -0,0 +1,10 @@ +#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from bandit.cli.baseline import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env_27/bin/bandit-config-generator b/env_27/bin/bandit-config-generator new file mode 100755 index 0000000..a4d24f1 --- /dev/null +++ b/env_27/bin/bandit-config-generator @@ -0,0 +1,10 @@ +#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from bandit.cli.config_generator import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env_27/bin/coverage b/env_27/bin/coverage new file mode 100755 index 0000000..f6865c6 --- /dev/null +++ b/env_27/bin/coverage @@ -0,0 +1,10 @@ +#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from coverage.cmdline import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env_27/bin/coverage-2.7 b/env_27/bin/coverage-2.7 new file mode 100755 index 0000000..f6865c6 --- /dev/null +++ b/env_27/bin/coverage-2.7 @@ -0,0 +1,10 @@ +#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from coverage.cmdline import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env_27/bin/coverage2 b/env_27/bin/coverage2 new file mode 100755 index 0000000..f6865c6 --- /dev/null +++ b/env_27/bin/coverage2 @@ -0,0 +1,10 @@ +#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from coverage.cmdline import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env_27/bin/isort b/env_27/bin/isort new file mode 100755 index 0000000..b8dce30 --- /dev/null +++ b/env_27/bin/isort @@ -0,0 +1,10 @@ +#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from isort.main import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env_27/bin/pbr b/env_27/bin/pbr new file mode 100755 index 0000000..b05bf09 --- /dev/null +++ b/env_27/bin/pbr @@ -0,0 +1,10 @@ +#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from pbr.cmd.main import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env_27/bin/py.test b/env_27/bin/py.test new file mode 100755 index 0000000..76f5caa --- /dev/null +++ b/env_27/bin/py.test @@ -0,0 +1,10 @@ +#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from pytest import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env_27/bin/pydocstyle b/env_27/bin/pydocstyle new file mode 100755 index 0000000..53916c7 --- /dev/null +++ b/env_27/bin/pydocstyle @@ -0,0 +1,10 @@ +#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from pydocstyle.cli import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env_27/bin/pytest b/env_27/bin/pytest new file mode 100755 index 0000000..76f5caa --- /dev/null +++ b/env_27/bin/pytest @@ -0,0 +1,10 @@ +#!/Users/rkau2905/Devel/python/private/ansible-later/env_27/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from pytest import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/INSTALLER b/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/METADATA b/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/METADATA new file mode 100644 index 0000000..f6f67c3 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/METADATA @@ -0,0 +1,31 @@ +Metadata-Version: 2.1 +Name: GitPython +Version: 2.1.11 +Summary: Python Git Library +Home-page: https://github.com/gitpython-developers/GitPython +Author: Sebastian Thiel, Michael Trier +Author-email: byronimo@gmail.com, mtrier@gmail.com +License: BSD License +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Requires: gitdb2 (>=2.0.0) +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* +Requires-Dist: gitdb2 (>=2.0.0) + +GitPython is a python library used to interact with Git repositories + + diff --git a/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/RECORD b/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/RECORD new file mode 100644 index 0000000..9ce666f --- /dev/null +++ b/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/RECORD @@ -0,0 +1,202 @@ +GitPython-2.1.11.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +GitPython-2.1.11.dist-info/METADATA,sha256=1hl7XlFNafLSMaQA9lMPWs3HvZNCeLBPxWCOk9DFA8w,1174 +GitPython-2.1.11.dist-info/RECORD,, +GitPython-2.1.11.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 +GitPython-2.1.11.dist-info/top_level.txt,sha256=0hzDuIp8obv624V3GmbqsagBWkk8ohtGU-Bc1PmTT0o,4 +git/__init__.py,sha256=ADA42qaulal45gMhMwD34EWD1BB2ipRDLAv0lITLhUg,2336 +git/__init__.pyc,, +git/cmd.py,sha256=5mmEzjWKKZcTE_NWOt6qAVejTnfB40vfp_r1ojTcrGM,42731 +git/cmd.pyc,, +git/compat.py,sha256=iikgGbxQeH2zQdbH6-17aU02YjQLNoScj_IzXHmklIU,9372 +git/compat.pyc,, +git/config.py,sha256=F20DpDX1NENCG4hCkwDkOA4wkgfZwG32TrLoNxMbyho,23132 +git/config.pyc,, +git/db.py,sha256=pgC0B4wCqRN7xrvLuMQhLwM252ilkZyFL6qq0cPA2j4,1980 +git/db.pyc,, +git/diff.py,sha256=Wtj8JQadZj473oXmx5RoMueUp1_P7I1vpRrxt6Nnhwg,18742 +git/diff.pyc,, +git/exc.py,sha256=U01S3wgpn9gGfGG8fIvL6p127Uu2LI_Ho7Jcc24xOYI,4878 +git/exc.pyc,, +git/index/__init__.py,sha256=Wj5zgJggZkEXueEDXdijxXahzxhextC08k70n0lHRN0,129 +git/index/__init__.pyc,, +git/index/base.py,sha256=ACRqf_rarJULDQwl5G8QWCbMv4JmT0E9Dg_uVfaLQdo,52048 +git/index/base.pyc,, +git/index/fun.py,sha256=ZptIr1Wr7Zh1RlI1LliRxxGaxxwLsLHd94GSpk9ClGw,14312 +git/index/fun.pyc,, +git/index/typ.py,sha256=GLBZbDS3yScHJs0U18CX-heLTDjjGu6fN7T2L_NQr4A,4976 +git/index/typ.pyc,, +git/index/util.py,sha256=l6oh9_1KU1v5GQdpxqCOqs6WLt5xN1uWvkVHQqcCToA,2902 +git/index/util.pyc,, +git/objects/__init__.py,sha256=6C02LlMygiFwTYtncz3GxEQfzHZr2WvUId0fnJ8HfLo,683 +git/objects/__init__.pyc,, +git/objects/base.py,sha256=UZiyzyzx4_OJ3bWnwqb3mqh0LXT7oo0biYaTm-sLuAw,6689 +git/objects/base.pyc,, +git/objects/blob.py,sha256=evI3ptPmlln6gLpoQRvbIKjK4v59nT8ipd1vk1dGYtc,927 +git/objects/blob.pyc,, +git/objects/commit.py,sha256=KfpGWRrN9Y2QikcfeFThN2gc8NZ68K5pBAPhz8OrN7Q,20791 +git/objects/commit.pyc,, +git/objects/fun.py,sha256=kEUFE2Q5kXeoxbjALRNl_jzRP_m9KKqQyxIcW-h6XGM,7415 +git/objects/fun.pyc,, +git/objects/submodule/__init__.py,sha256=OsMeiex7cG6ev2f35IaJ5csH-eXchSoNKCt4HXUG5Ws,93 +git/objects/submodule/__init__.pyc,, +git/objects/submodule/base.py,sha256=0DLx40Mz-l0QPz3RDXz9gols8LkGkCQlwvCKerNGd1s,53921 +git/objects/submodule/base.pyc,, +git/objects/submodule/root.py,sha256=N2i0PjRcw5bNLLIDAkviQjXhf9RvGSfVnbav4FNzkXo,17656 +git/objects/submodule/root.pyc,, +git/objects/submodule/util.py,sha256=VdgIG-cBo47b_7JcolAvjWaIMU0X5oImLjJ4wluc_iw,2745 +git/objects/submodule/util.pyc,, +git/objects/tag.py,sha256=h2nD3iO4GB6f9yqr2nqJ4mhMj1DDaqEJl2hQm2b8vJ8,3162 +git/objects/tag.pyc,, +git/objects/tree.py,sha256=Ta1qAkuwzn7lk54_d7knqF2WL6DOc2MQG1k8mKLel1s,11069 +git/objects/tree.pyc,, +git/objects/util.py,sha256=GRIAWLR1gaK2QE3s5TtmNmN_LqOjEBlUkZWiouJMVe8,12355 +git/objects/util.pyc,, +git/refs/__init__.py,sha256=3CRfAyE-Z78rJ3kSdKR1PNiXHEjHLw2VkU2JyDviNDU,242 +git/refs/__init__.pyc,, +git/refs/head.py,sha256=MbO65f5SU0X3DvzmHpU0dY4h4SyvwqNaW2d8bUu3YSc,8724 +git/refs/head.pyc,, +git/refs/log.py,sha256=_4JC--cqZc49Gm0ZLkE66ePsaxwP4TagiCQi53_xI88,10881 +git/refs/log.pyc,, +git/refs/reference.py,sha256=OcQMwHJuelR1yKe1EF0IBfxeQZYv2kf0xunNSVwZV-M,4408 +git/refs/reference.pyc,, +git/refs/remote.py,sha256=6JOyIurnomM3tNXdKRXfMK_V75gJNgr9_2sdevKU_tI,1670 +git/refs/remote.pyc,, +git/refs/symbolic.py,sha256=2YA20sTqEP5IOqZi7TR9AxIBHrwAPRnR62wm9q8k5ko,26858 +git/refs/symbolic.pyc,, +git/refs/tag.py,sha256=qoHwJ9suHx8u8NNg-6GvNftK36RnCNkpElRjh2r9wcI,2964 +git/refs/tag.pyc,, +git/remote.py,sha256=mRDU9YLiJGpz0nctfE0HNWLbLnqCo5_umcJeMSPFT8M,35554 +git/remote.pyc,, +git/repo/__init__.py,sha256=ssUH4IVCoua5shI5h1l46P0X1kp82ydxVcH3PIVCnzg,108 +git/repo/__init__.pyc,, +git/repo/base.py,sha256=cAqM1khmzcpKkV7bVsAkZTV65weMPs7i-8b0FZhVl6w,41729 +git/repo/base.pyc,, +git/repo/fun.py,sha256=chh3Jp9ZKtKU9dWyEvHuY5vnGQjNyCKLc5fU8K6POyY,11382 +git/repo/fun.pyc,, +git/test/__init__.py,sha256=q-WCITGqFKTHnRFjUvJz5hUJBi8SP4InaAZRXZ8qj8k,220 +git/test/__init__.pyc,, +git/test/fixtures/blame,sha256=4EDRSXdgbRtxHU_2lASFXC7eNShL2cVq3IU43tLWlD4,3663 +git/test/fixtures/blame_binary,sha256=YLzoHqTAuv2Uv8IILh4ndQxJ_A1c09176E-3d5FMQsM,14807 +git/test/fixtures/blame_complex_revision,sha256=tPguLsqmLxjuZWg5nRcdZCZeaBi-LOeVQEHfTX6X_B0,7645 +git/test/fixtures/blame_incremental,sha256=3VXtrk8LVqfS5f2vsP5DTzFU3opeevUbENQUq22vTdw,982 +git/test/fixtures/blame_incremental_2.11.1_plus,sha256=JDA_xCevOrOMDeKW-U8svYeA0E8Pa3sI7G8GALpxOHw,1154 +git/test/fixtures/cat_file.py,sha256=7RDIymGyByw8I1OibenXM-DVsZ0_7gpazeYYG4C5GDM,136 +git/test/fixtures/cat_file.pyc,, +git/test/fixtures/cat_file_blob,sha256=ZOyIygCyaOW6GjVnihtTFtIS9PNmskdyMlNKiuyjfzw,11 +git/test/fixtures/cat_file_blob_nl,sha256=GJShnIW6FTrL90OsTkP8AEyJFgSyb4xp4eg-oq_HxI8,12 +git/test/fixtures/cat_file_blob_size,sha256=JdTyqG3rXiV0uzIQtnuyT8xK-xn5OntloFfaqHSp0Y4,3 +git/test/fixtures/commit_invalid_data,sha256=QlV-Pw5mw1Vhp6qivAQY5kcBP_BMJ_OIdLCinmes5Sw,242 +git/test/fixtures/commit_with_gpgsig,sha256=3in_tJPkQv2K1wFx-PGqaCZQe40liMnl9cMYOJ8krTA,1387 +git/test/fixtures/diff_2,sha256=sxE-xkV5lQrUEbpllp2X_AcFfPUmUr2wvSsc9qkZQLc,1994 +git/test/fixtures/diff_2f,sha256=na11T8R1dhJUOKeO-fEeHymOxhXNrjvzzmA_r7x6oJM,732 +git/test/fixtures/diff_abbrev-40_full-index_M_raw_no-color,sha256=AW-YEfutyH_RVyaP2nCTPhtjvkfqWi7NVL4s9Ab3Qww,109 +git/test/fixtures/diff_change_in_type,sha256=Wo1iCaT1YBfGn5ZSJ40H7iVeqXKm-v-qJnsBUBKrpsI,319 +git/test/fixtures/diff_change_in_type_raw,sha256=67KYtwIlQdTSwesABnIYTZxFgiwPhVyBXaDFoPXRFt4,108 +git/test/fixtures/diff_f,sha256=sNsG26bYvqU4pK_RwahaO-Lya8O9Xonwlyth8do_ptY,504 +git/test/fixtures/diff_file_with_spaces,sha256=BOvQkq4AjQ_cR1e0iLYDQdNq2BLa-P5xhI4Xal7hYcE,216 +git/test/fixtures/diff_i,sha256=792rEQvP9Q-MNxZ3_FsvhG5emE_q1nT9jpmQ_A1hFWE,5705 +git/test/fixtures/diff_index_patch,sha256=qd9jD_eAQY5I9OLsbqdz3-lm_ncL2ALJhVLyj3enAfk,4598 +git/test/fixtures/diff_index_raw,sha256=odNXPZQ4rlBnqYfJvvTKGS8QvfJE33WN_X-lIRMT8NI,101 +git/test/fixtures/diff_initial,sha256=1RJTg7QSTdMGlqLDvjFUhKtV0bAV2NFW8rHBgzlVfyg,76 +git/test/fixtures/diff_mode_only,sha256=pqDOHBLm09TWZ0orff-S7pCkQktD2sooW5mURG0vqLQ,46005 +git/test/fixtures/diff_new_mode,sha256=b70EDNoC_gfq_P_fVFCIqT3WHU_P0l-1jhuR2cSEJFg,546 +git/test/fixtures/diff_numstat,sha256=_Ls171vvsERXlRiJ1i1tA5vHyoYCzt3hKorFmic7UyE,22 +git/test/fixtures/diff_p,sha256=3YlhR3UNFIPDv90Zn1vCXC46kQCVDuepUZIzwzD8xmk,19273 +git/test/fixtures/diff_patch_binary,sha256=CLWigD0x0z3n_fpdh8LlkEyRUy7oDiWM-CJpGrqWPiM,155 +git/test/fixtures/diff_patch_unsafe_paths,sha256=jsc2GM8j56puEDnMEhlBHG4jIhziN0uY8cuzGTTtHmw,3145 +git/test/fixtures/diff_raw_binary,sha256=-PUPqf5wop8KkmubHnPK6RAVinlJuQf9Lqo4VBff23I,103 +git/test/fixtures/diff_rename,sha256=-f4kqw0Zt1lRZZOmt5I0w9Jenbr3PngyTH2QeUQfv8g,415 +git/test/fixtures/diff_rename_raw,sha256=VVBUjGEoXWWMYQFq-dyE708DijCnG974Qn79plVT39Q,112 +git/test/fixtures/diff_tree_numstat_root,sha256=NbBofQm3wGm-1hyz8XKIoxMtC_bzz4x8TlxxuF8LLDU,63 +git/test/fixtures/for_each_ref_with_path_component,sha256=hHVSiVHNEW5PKSPP4zFxxpYs4EYlPSJ9y-yykzkpWjk,84 +git/test/fixtures/git_config,sha256=_Igi3In2TsksvwUdn7YcusMv-069ftMdlV1G7ZCs8nU,1517 +git/test/fixtures/git_config-inc.cfg,sha256=jYjjNgfYBBkEAXYj5wLy7en-ISXbvVyOOfOmKsURYdc,92 +git/test/fixtures/git_config_global,sha256=_tFDHYTW1Hxue2WXqjafVm_b9eM-OjTV6WTD2yZ3aqM,366 +git/test/fixtures/git_config_with_comments,sha256=Q9IHrB4KE3l15iXoYD9-4TIMyd_rFczQ1CPAu-CI8bU,3997 +git/test/fixtures/git_config_with_empty_value,sha256=686iisjxnex4YeT4qWdjsQh22X8UDw5yzKSerefFSTM,35 +git/test/fixtures/git_file,sha256=44Qr9_8TluxWGPiPjDT4dEyF8x3fvnA9W7moDNiFAKo,16 +git/test/fixtures/index,sha256=OBeM4XodizcBFgK_7S92fdjNTaitNxGzSBkcHXXWQvs,163616 +git/test/fixtures/index_merge,sha256=IdtRRV85gi9dGFC4LNuGrZU2yttGAAANeS0_qvNO85w,9192 +git/test/fixtures/issue-301_stderr,sha256=z6QL_UgCKQ1MMviNQNdhM22hOgp00zfJyc5LCm7Jl64,302879 +git/test/fixtures/ls_tree_a,sha256=uBvIY8-7HnaBvSsVYigYJdsbeslxrtfeXh-tWXKtOnc,429 +git/test/fixtures/ls_tree_b,sha256=pW3aIRcXMA1ZSE36049fJWeiVQl95qk_31U8Eh3Tc1c,119 +git/test/fixtures/ls_tree_commit,sha256=cOgzX5Qcqvy4LU4dIBkcc63ccrOPBLab5DsCQPVpz_E,173 +git/test/fixtures/ls_tree_empty,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +git/test/fixtures/reflog_HEAD,sha256=8J5zwsJRmdb8xdWyQoRUzJYJnDSaeo0rLa5uthBovyQ,114210 +git/test/fixtures/reflog_invalid_date,sha256=VlAYk2kGs3CySphJV0OmWwpWZK_uB9FxICTICZuKwSM,409 +git/test/fixtures/reflog_invalid_email,sha256=1OoNdoGKNcNKWVQAv5ZKSgVEt0zSkigvHOTs3MMhVW0,411 +git/test/fixtures/reflog_invalid_newsha,sha256=i-xph-C12dZT-dEKWS4VTDtX4AzQVUcCF3KXfMp9Gu0,404 +git/test/fixtures/reflog_invalid_oldsha,sha256=guzXH-wQOfz3yQJFMChzhuXcgQ6G6rGTSwlIdBVX8Wg,398 +git/test/fixtures/reflog_invalid_sep,sha256=0D9WHWpIGE2tQXD8utDcq-bbxdgVnWWCAMK_vwI3-zA,415 +git/test/fixtures/reflog_master,sha256=K1-VX1oQ3gM_23qTjVV-8yQOXeXuRtePgUXAE6D1TVo,31286 +git/test/fixtures/rev_list,sha256=pJPFZuJGwLzQ6m4P2d7VNaRLdMefGxxtztgU9fQfCCU,123 +git/test/fixtures/rev_list_bisect_all,sha256=r0gnyZwq-IVHxNss4qE6zMv29PEcLyE0t_fV4MKISHc,2172 +git/test/fixtures/rev_list_commit_diffs,sha256=n8qhU8FHEqr7Z8z8PvRGEODveuPbFIuaXB8UYGTqTPc,306 +git/test/fixtures/rev_list_commit_idabbrev,sha256=W_cHcxor5sFGeS8-nmIpWNim-wtFY7636Hwh04Sfve8,271 +git/test/fixtures/rev_list_commit_stats,sha256=1bZgYDN3iqjdIiZtYUuPNZXcyJYlDiusy3dw5utnr3M,244 +git/test/fixtures/rev_list_count,sha256=wyBmlaA46bFntXaF6nx28phdDPwTZVW5kJr71pRrmb0,26855 +git/test/fixtures/rev_list_delta_a,sha256=ikrcoYkO311vbCS_xoeyKE6myYKlKP5by88KU4oG6qI,328 +git/test/fixtures/rev_list_delta_b,sha256=iiTGJRF2nzZrsHLXB1oOcZaoLvnSGAB3B9PLt5acmno,451 +git/test/fixtures/rev_list_single,sha256=YqAJowQ_ujS8kUnNfBlm8ibKY7ki5vu2nXc_vt-4nq0,293 +git/test/fixtures/rev_parse,sha256=y9iM5H6QPxDLEoGO9D4qSMBuDw4nz196c5VMflC1rak,8 +git/test/fixtures/show_empty_commit,sha256=xeKoNCOFUPZcSztV3olKSs6u14fVdHwjnkGYLsEcZn8,252 +git/test/fixtures/uncommon_branch_prefix_FETCH_HEAD,sha256=NO36DB4HWl4sOisR6EdFroTDakA-4XOx2kk4lFQIsiQ,603 +git/test/fixtures/uncommon_branch_prefix_stderr,sha256=4-rJlXvPu-1ByjZzsUUJXFruPRxan7C5ssNtM7qZbeo,324 +git/test/lib/__init__.py,sha256=k2xMRT9FC0m3yX_iMKaDcyuuZe0tGSr95ork3VOaeWk,414 +git/test/lib/__init__.pyc,, +git/test/lib/asserts.py,sha256=_9sOUHopeO-3PZOkxMXfTWaTxxPaWwmpnAVaDxpcaWk,2273 +git/test/lib/asserts.pyc,, +git/test/lib/helper.py,sha256=TI69pdx0xIMhfzOzBDB3BwqPvPsykp9bUXiyw2B0Xd8,13592 +git/test/lib/helper.pyc,, +git/test/performance/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +git/test/performance/__init__.pyc,, +git/test/performance/lib.py,sha256=qSicSiyRI30rP3EFeVoevC_sBDgXDFtZKIFr_Ikz84g,2427 +git/test/performance/lib.pyc,, +git/test/performance/test_commit.py,sha256=ws8ORcvg3h0eXkI2G7a4OEl5QFG-9s2Agf0ut_8sUqU,3732 +git/test/performance/test_commit.pyc,, +git/test/performance/test_odb.py,sha256=knbDhq2sRagwyGHKQ7uNZLWN8bzYt_VF6bNucoON6dI,2651 +git/test/performance/test_odb.pyc,, +git/test/performance/test_streams.py,sha256=w5mqPX6Yjo-j3mAz9GGa0O_pzYCBgGhhdoKCAz-iMD0,5850 +git/test/performance/test_streams.pyc,, +git/test/test_actor.py,sha256=1bYmrTwWAYT_Qj9l9chbvuI8nNtHY6yGlDRJDDEq9A0,1242 +git/test/test_actor.pyc,, +git/test/test_base.py,sha256=k6I5nG7ZeBCYpXwi3HX_mvURFelgvQFys5pWVQR6kjw,5649 +git/test/test_base.pyc,, +git/test/test_blob.py,sha256=Bs4FWke9Sjzx06EJuG9hh1T5qBgJEEz4aBCcr3cW9L0,878 +git/test/test_blob.pyc,, +git/test/test_commit.py,sha256=IhFvgXZ05tT0MTdheIHjNCHjVXpWytORBdcgaunJ2HY,15460 +git/test/test_commit.pyc,, +git/test/test_config.py,sha256=HaAqpKZyURYV4ggy65FEIO9KAvS7wEaR5rbJvE0_NGQ,11066 +git/test/test_config.pyc,, +git/test/test_db.py,sha256=e9UNddyQfoa-kzZo-XyrwVuYiq887NUkYrK8wZkTu9M,939 +git/test/test_db.pyc,, +git/test/test_diff.py,sha256=D6JROrJiEmpvju_WZxkiO64ryU4yfA4viUdMN_3eBcg,12598 +git/test/test_diff.pyc,, +git/test/test_docs.py,sha256=Au9rFCRPdF6HDq4TosrS6N_tmqo7xUUhdwhqHFArfLI,25357 +git/test/test_docs.pyc,, +git/test/test_exc.py,sha256=0DBYNiYVfPVlFKYRzqsoZUJnf0lQiUDmdrRIIHWeSlE,5123 +git/test/test_exc.pyc,, +git/test/test_fun.py,sha256=a91XgGk-YPwlgJEc-gy2tI_ilSq29XSQEywwc-kDnG0,10456 +git/test/test_fun.pyc,, +git/test/test_git.py,sha256=Z1TnRsgcvxT9vO09t8_5Z6Vcp5kgqw8iyrggb8aJklA,10681 +git/test/test_git.pyc,, +git/test/test_index.py,sha256=WiHPJNgD5Y00YKJoklTZkPJH0LE-0XEOTrA_2O9rsRs,37014 +git/test/test_index.pyc,, +git/test/test_reflog.py,sha256=vfI-NQCtnGlJEUtYR0_k7Y1Hc4pZQ5F_T4T49hxSnNU,3505 +git/test/test_reflog.pyc,, +git/test/test_refs.py,sha256=2rNm9HdJZTWXx775JHG_R9Pd5X022IQ9C2CbP_9vDoE,23357 +git/test/test_refs.pyc,, +git/test/test_remote.py,sha256=6HuzDH827CjTVklhEUR0di2z78_N61OxJ0t5ZkD0jAw,26426 +git/test/test_remote.pyc,, +git/test/test_repo.py,sha256=-J6Bdz3t0zPNEhjBaN41uZZQAEKZHPMhzNGhih4eYL0,37732 +git/test/test_repo.pyc,, +git/test/test_stats.py,sha256=qmF2lL1wW0tEd17E-tkjmpPFVXzjREf7KW5JMCTQ4Zg,971 +git/test/test_stats.pyc,, +git/test/test_submodule.py,sha256=aO4WTQjfiyUcus2FFAm2UIb-F53KYhe2TvaRvbloXMo,41434 +git/test/test_submodule.pyc,, +git/test/test_tree.py,sha256=nR5OAQZLhv7kISoL3RO8ppkXAbKFYz3XlPAxABU1b4o,4046 +git/test/test_tree.pyc,, +git/test/test_util.py,sha256=oh0ZJbOwPtqmMP3xEFkOsjD4G24-3kpZM9fqjZviLqU,8714 +git/test/test_util.pyc,, +git/util.py,sha256=1mvwDNVbMu5hNGQZmATcYx9QbMhW6ZFRvnq7LtJGajk,32154 +git/util.pyc,, diff --git a/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/WHEEL b/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/WHEEL new file mode 100644 index 0000000..1316c41 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.31.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/top_level.txt b/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/top_level.txt new file mode 100644 index 0000000..5664e30 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/GitPython-2.1.11.dist-info/top_level.txt @@ -0,0 +1 @@ +git diff --git a/env_27/lib/python2.7/site-packages/_pytest/__init__.py b/env_27/lib/python2.7/site-packages/_pytest/__init__.py new file mode 100644 index 0000000..46c7827 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/__init__.py @@ -0,0 +1,8 @@ +__all__ = ["__version__"] + +try: + from ._version import version as __version__ +except ImportError: + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = "unknown" diff --git a/env_27/lib/python2.7/site-packages/_pytest/_argcomplete.py b/env_27/lib/python2.7/site-packages/_pytest/_argcomplete.py new file mode 100644 index 0000000..455c3a7 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/_argcomplete.py @@ -0,0 +1,109 @@ +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +from glob import glob + + +class FastFilesCompleter(object): + "Fast file completer class" + + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if "*" not in prefix and "?" not in prefix: + # we are on unix, otherwise no bash + if not prefix or prefix[-1] == os.path.sep: + globbed.extend(glob(prefix + ".*")) + prefix += "*" + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += "/" + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + + +if os.environ.get("_ARGCOMPLETE"): + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser, always_complete_options=False) + + +else: + + def try_argcomplete(parser): + pass + + filescompleter = None diff --git a/env_27/lib/python2.7/site-packages/_pytest/_code/__init__.py b/env_27/lib/python2.7/site-packages/_pytest/_code/__init__.py new file mode 100644 index 0000000..fe755a3 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/_code/__init__.py @@ -0,0 +1,14 @@ +""" python inspection/code generation API """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from .code import Code # noqa +from .code import ExceptionInfo # noqa +from .code import filter_traceback # noqa +from .code import Frame # noqa +from .code import getrawcode # noqa +from .code import Traceback # noqa +from .source import compile_ as compile # noqa +from .source import getfslineno # noqa +from .source import Source # noqa diff --git a/env_27/lib/python2.7/site-packages/_pytest/_code/_py2traceback.py b/env_27/lib/python2.7/site-packages/_pytest/_code/_py2traceback.py new file mode 100644 index 0000000..8a8b7bc --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/_code/_py2traceback.py @@ -0,0 +1,94 @@ +# copied from python-2.7.3's traceback.py +# CHANGES: +# - some_str is replaced, trying to create unicode strings +# +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import types + +from six import text_type + + +def format_exception_only(etype, value): + """Format the exception part of a traceback. + + The arguments are the exception type and value such as given by + sys.last_type and sys.last_value. The return value is a list of + strings, each ending in a newline. + + Normally, the list contains a single string; however, for + SyntaxError exceptions, it contains several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the list. + + """ + + # An instance should not have a meaningful value parameter, but + # sometimes does, particularly for string exceptions, such as + # >>> raise string1, string2 # deprecated + # + # Clear these out first because issubtype(string1, SyntaxError) + # would throw another exception and mask the original problem. + if ( + isinstance(etype, BaseException) + or isinstance(etype, types.InstanceType) + or etype is None + or type(etype) is str + ): + return [_format_final_exc_line(etype, value)] + + stype = etype.__name__ + + if not issubclass(etype, SyntaxError): + return [_format_final_exc_line(stype, value)] + + # It was a syntax error; show exactly where the problem was found. + lines = [] + try: + msg, (filename, lineno, offset, badline) = value.args + except Exception: + pass + else: + filename = filename or "" + lines.append(' File "{}", line {}\n'.format(filename, lineno)) + if badline is not None: + if isinstance(badline, bytes): # python 2 only + badline = badline.decode("utf-8", "replace") + lines.append(" {}\n".format(badline.strip())) + if offset is not None: + caretspace = badline.rstrip("\n")[:offset].lstrip() + # non-space whitespace (likes tabs) must be kept for alignment + caretspace = ((c.isspace() and c or " ") for c in caretspace) + # only three spaces to account for offset1 == pos 0 + lines.append(" {}^\n".format("".join(caretspace))) + value = msg + + lines.append(_format_final_exc_line(stype, value)) + return lines + + +def _format_final_exc_line(etype, value): + """Return a list of a single line -- normal case for format_exception_only""" + valuestr = _some_str(value) + if value is None or not valuestr: + line = "{}\n".format(etype) + else: + line = "{}: {}\n".format(etype, valuestr) + return line + + +def _some_str(value): + try: + return text_type(value) + except Exception: + try: + return bytes(value).decode("UTF-8", "replace") + except Exception: + pass + return "".format(type(value).__name__) diff --git a/env_27/lib/python2.7/site-packages/_pytest/_code/code.py b/env_27/lib/python2.7/site-packages/_pytest/_code/code.py new file mode 100644 index 0000000..79845db --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/_code/code.py @@ -0,0 +1,1090 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import inspect +import pprint +import re +import sys +import traceback +from inspect import CO_VARARGS +from inspect import CO_VARKEYWORDS +from weakref import ref + +import attr +import pluggy +import py +import six +from six import text_type + +import _pytest +from _pytest._io.saferepr import saferepr +from _pytest.compat import _PY2 +from _pytest.compat import _PY3 +from _pytest.compat import PY35 +from _pytest.compat import safe_str + +if _PY3: + from traceback import format_exception_only +else: + from ._py2traceback import format_exception_only + + +class Code(object): + """ wrapper around Python code objects """ + + def __init__(self, rawcode): + if not hasattr(rawcode, "co_filename"): + rawcode = getrawcode(rawcode) + try: + self.filename = rawcode.co_filename + self.firstlineno = rawcode.co_firstlineno - 1 + self.name = rawcode.co_name + except AttributeError: + raise TypeError("not a code object: %r" % (rawcode,)) + self.raw = rawcode + + def __eq__(self, other): + return self.raw == other.raw + + __hash__ = None + + def __ne__(self, other): + return not self == other + + @property + def path(self): + """ return a path object pointing to source code (note that it + might not point to an actually existing file). """ + try: + p = py.path.local(self.raw.co_filename) + # maybe don't try this checking + if not p.check(): + raise OSError("py.path check failed.") + except OSError: + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + p = self.raw.co_filename + + return p + + @property + def fullsource(self): + """ return a _pytest._code.Source object for the full source file of the code + """ + from _pytest._code import source + + full, _ = source.findsource(self.raw) + return full + + def source(self): + """ return a _pytest._code.Source object for the code object's source only + """ + # return source only for that part of code + import _pytest._code + + return _pytest._code.Source(self.raw) + + def getargs(self, var=False): + """ return a tuple with the argument names for the code object + + if 'var' is set True also return the names of the variable and + keyword arguments when present + """ + # handfull shortcut for getting args + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + + +class Frame(object): + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + def __init__(self, frame): + self.lineno = frame.f_lineno - 1 + self.f_globals = frame.f_globals + self.f_locals = frame.f_locals + self.raw = frame + self.code = Code(frame.f_code) + + @property + def statement(self): + """ statement this frame is at """ + import _pytest._code + + if self.code.fullsource is None: + return _pytest._code.Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """ evaluate 'code' in the frame + + 'vars' are optional additional local variables + + returns the result of the evaluation + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def exec_(self, code, **vars): + """ exec 'code' in the frame + + 'vars' are optiona; additional local variables + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + six.exec_(code, self.f_globals, f_locals) + + def repr(self, object): + """ return a 'safe' (non-recursive, one-line) string repr for 'object' + """ + return saferepr(object) + + def is_true(self, object): + return object + + def getargs(self, var=False): + """ return a list of tuples (name, value) for all arguments + + if 'var' is set True also include the variable and keyword + arguments when present + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + + +class TracebackEntry(object): + """ a single entry in a traceback """ + + _repr_style = None + exprinfo = None + + def __init__(self, rawentry, excinfo=None): + self._excinfo = excinfo + self._rawentry = rawentry + self.lineno = rawentry.tb_lineno - 1 + + def set_repr_style(self, mode): + assert mode in ("short", "long") + self._repr_style = mode + + @property + def frame(self): + import _pytest._code + + return _pytest._code.Frame(self._rawentry.tb_frame) + + @property + def relline(self): + return self.lineno - self.frame.code.firstlineno + + def __repr__(self): + return "" % (self.frame.code.path, self.lineno + 1) + + @property + def statement(self): + """ _pytest._code.Source object for the current statement """ + source = self.frame.code.fullsource + return source.getstatement(self.lineno) + + @property + def path(self): + """ path to the source code """ + return self.frame.code.path + + def getlocals(self): + return self.frame.f_locals + + locals = property(getlocals, None, None, "locals of underlaying frame") + + def getfirstlinesource(self): + # on Jython this firstlineno can be -1 apparently + return max(self.frame.code.firstlineno, 0) + + def getsource(self, astcache=None): + """ return failing source code. """ + # we use the passed in astcache to not reparse asttrees + # within exception info printing + from _pytest._code.source import getstatementrange_ast + + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast( + self.lineno, source, astnode=astnode + ) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self): + """ return True if the current frame has a var __tracebackhide__ + resolving to True. + + If __tracebackhide__ is a callable, it gets called with the + ExceptionInfo instance and can decide whether to hide the traceback. + + mostly for internal use + """ + f = self.frame + tbh = f.f_locals.get( + "__tracebackhide__", f.f_globals.get("__tracebackhide__", False) + ) + if tbh and callable(tbh): + return tbh(None if self._excinfo is None else self._excinfo()) + return tbh + + def __str__(self): + try: + fn = str(self.path) + except py.error.Error: + fn = "???" + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except: # noqa + line = "???" + return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line) + + def name(self): + return self.frame.code.raw.co_name + + name = property(name, None, None, "co_name of underlaying code") + + +class Traceback(list): + """ Traceback objects encapsulate and offer higher level + access to Traceback entries. + """ + + Entry = TracebackEntry + + def __init__(self, tb, excinfo=None): + """ initialize from given python traceback object and ExceptionInfo """ + self._excinfo = excinfo + if hasattr(tb, "tb_next"): + + def f(cur): + while cur is not None: + yield self.Entry(cur, excinfo=excinfo) + cur = cur.tb_next + + list.__init__(self, f(tb)) + else: + list.__init__(self, tb) + + def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): + """ return a Traceback instance wrapping part of this Traceback + + by provding any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined + + this allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback) + """ + for x in self: + code = x.frame.code + codepath = code.path + if ( + (path is None or codepath == path) + and ( + excludepath is None + or not hasattr(codepath, "relto") + or not codepath.relto(excludepath) + ) + and (lineno is None or x.lineno == lineno) + and (firstlineno is None or x.frame.code.firstlineno == firstlineno) + ): + return Traceback(x._rawentry, self._excinfo) + return self + + def __getitem__(self, key): + val = super(Traceback, self).__getitem__(key) + if isinstance(key, type(slice(0))): + val = self.__class__(val) + return val + + def filter(self, fn=lambda x: not x.ishidden()): + """ return a Traceback instance with certain items removed + + fn is a function that gets a single argument, a TracebackEntry + instance, and should return True when the item should be added + to the Traceback, False when not + + by default this removes all the TracebackEntries which are hidden + (see ishidden() above) + """ + return Traceback(filter(fn, self), self._excinfo) + + def getcrashentry(self): + """ return last non-hidden traceback entry that lead + to the exception of a traceback. + """ + for i in range(-1, -len(self) - 1, -1): + entry = self[i] + if not entry.ishidden(): + return entry + return self[-1] + + def recursionindex(self): + """ return the index of the frame/TracebackEntry where recursion + originates if appropriate, None if no recursion occurred + """ + cache = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + # XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + # print "checking for recursion at", key + values = cache.setdefault(key, []) + if values: + f = entry.frame + loc = f.f_locals + for otherloc in values: + if f.is_true( + f.eval( + co_equal, + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc, + ) + ): + return i + values.append(entry.frame.f_locals) + return None + + +co_equal = compile( + "__recursioncache_locals_1 == __recursioncache_locals_2", "?", "eval" +) + + +@attr.s(repr=False) +class ExceptionInfo(object): + """ wraps sys.exc_info() objects and offers + help for navigating the traceback. + """ + + _assert_start_repr = ( + "AssertionError(u'assert " if _PY2 else "AssertionError('assert " + ) + + _excinfo = attr.ib() + _striptext = attr.ib(default="") + _traceback = attr.ib(default=None) + + @classmethod + def from_current(cls, exprinfo=None): + """returns an ExceptionInfo matching the current traceback + + .. warning:: + + Experimental API + + + :param exprinfo: a text string helping to determine if we should + strip ``AssertionError`` from the output, defaults + to the exception message/``__str__()`` + """ + tup = sys.exc_info() + assert tup[0] is not None, "no current exception" + _striptext = "" + if exprinfo is None and isinstance(tup[1], AssertionError): + exprinfo = getattr(tup[1], "msg", None) + if exprinfo is None: + exprinfo = saferepr(tup[1]) + if exprinfo and exprinfo.startswith(cls._assert_start_repr): + _striptext = "AssertionError: " + + return cls(tup, _striptext) + + @classmethod + def for_later(cls): + """return an unfilled ExceptionInfo + """ + return cls(None) + + @property + def type(self): + """the exception class""" + return self._excinfo[0] + + @property + def value(self): + """the exception value""" + return self._excinfo[1] + + @property + def tb(self): + """the exception raw traceback""" + return self._excinfo[2] + + @property + def typename(self): + """the type name of the exception""" + return self.type.__name__ + + @property + def traceback(self): + """the traceback""" + if self._traceback is None: + self._traceback = Traceback(self.tb, excinfo=ref(self)) + return self._traceback + + @traceback.setter + def traceback(self, value): + self._traceback = value + + def __repr__(self): + if self._excinfo is None: + return "" + return "" % (self.typename, len(self.traceback)) + + def exconly(self, tryshort=False): + """ return the exception as a string + + when 'tryshort' resolves to True, and the exception is a + _pytest._code._AssertionError, only the actual exception part of + the exception representation is returned (so 'AssertionError: ' is + removed from the beginning) + """ + lines = format_exception_only(self.type, self.value) + text = "".join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext) :] + return text + + def errisinstance(self, exc): + """ return True if the exception is an instance of exc """ + return isinstance(self.value, exc) + + def _getreprcrash(self): + exconly = self.exconly(tryshort=True) + entry = self.traceback.getcrashentry() + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + return ReprFileLocation(path, lineno + 1, exconly) + + def getrepr( + self, + showlocals=False, + style="long", + abspath=False, + tbfilter=True, + funcargs=False, + truncate_locals=True, + chain=True, + ): + """ + Return str()able representation of this exception info. + + :param bool showlocals: + Show locals per traceback entry. + Ignored if ``style=="native"``. + + :param str style: long|short|no|native traceback style + + :param bool abspath: + If paths should be changed to absolute or left unchanged. + + :param bool tbfilter: + Hide entries that contain a local variable ``__tracebackhide__==True``. + Ignored if ``style=="native"``. + + :param bool funcargs: + Show fixtures ("funcargs" for legacy purposes) per traceback entry. + + :param bool truncate_locals: + With ``showlocals==True``, make sure locals can be safely represented as strings. + + :param bool chain: if chained exceptions in Python 3 should be shown. + + .. versionchanged:: 3.9 + + Added the ``chain`` parameter. + """ + if style == "native": + return ReprExceptionInfo( + ReprTracebackNative( + traceback.format_exception( + self.type, self.value, self.traceback[0]._rawentry + ) + ), + self._getreprcrash(), + ) + + fmt = FormattedExcinfo( + showlocals=showlocals, + style=style, + abspath=abspath, + tbfilter=tbfilter, + funcargs=funcargs, + truncate_locals=truncate_locals, + chain=chain, + ) + return fmt.repr_excinfo(self) + + def __str__(self): + if self._excinfo is None: + return repr(self) + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return str(loc) + + def __unicode__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return text_type(loc) + + def match(self, regexp): + """ + Match the regular expression 'regexp' on the string representation of + the exception. If it matches then True is returned (so that it is + possible to write 'assert excinfo.match()'). If it doesn't match an + AssertionError is raised. + """ + __tracebackhide__ = True + if not re.search(regexp, str(self.value)): + assert 0, "Pattern '{!s}' not found in '{!s}'".format(regexp, self.value) + return True + + +@attr.s +class FormattedExcinfo(object): + """ presenting information about failing Functions and Generators. """ + + # for traceback entries + flow_marker = ">" + fail_marker = "E" + + showlocals = attr.ib(default=False) + style = attr.ib(default="long") + abspath = attr.ib(default=True) + tbfilter = attr.ib(default=True) + funcargs = attr.ib(default=False) + truncate_locals = attr.ib(default=True) + chain = attr.ib(default=True) + astcache = attr.ib(default=attr.Factory(dict), init=False, repr=False) + + def _getindent(self, source): + # figure out indent for given source + try: + s = str(source.getstatement(len(source) - 1)) + except KeyboardInterrupt: + raise + except: # noqa + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except: # noqa + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry): + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def _saferepr(self, obj): + return saferepr(obj) + + def repr_args(self, entry): + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + args.append((argname, self._saferepr(argvalue))) + return ReprFuncArgs(args) + + def get_source(self, source, line_index=-1, excinfo=None, short=False): + """ return formatted and marked up source lines. """ + import _pytest._code + + lines = [] + if source is None or line_index >= len(source.lines): + source = _pytest._code.Source("???") + line_index = 0 + if line_index < 0: + line_index += len(source) + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index + 1 :]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly(self, excinfo, indent=4, markall=False): + lines = [] + indent = " " * indent + # get the real exception information out + exlines = excinfo.exconly(tryshort=True).split("\n") + failindent = self.fail_marker + indent[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indent + return lines + + def repr_locals(self, locals): + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == "__builtins__": + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + if self.truncate_locals: + str_repr = self._saferepr(value) + else: + str_repr = pprint.pformat(value) + # if len(str_repr) < 70 or not isinstance(value, + # (list, tuple, dict)): + lines.append("%-10s = %s" % (name, str_repr)) + # else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + + def repr_traceback_entry(self, entry, excinfo=None): + import _pytest._code + + source = self._getentrysource(entry) + if source is None: + source = _pytest._code.Source("???") + line_index = 0 + else: + # entry.getfirstlinesource() can be -1, should be 0 on jython + line_index = entry.lineno - max(entry.getfirstlinesource(), 0) + + lines = [] + style = entry._repr_style + if style is None: + style = self.style + if style in ("short", "long"): + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = "in %s" % (entry.name) + else: + message = excinfo and excinfo.typename or "" + path = self._makepath(entry.path) + filelocrepr = ReprFileLocation(path, entry.lineno + 1, message) + localsrepr = None + if not short: + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path): + if not self.abspath: + try: + np = py.path.local().bestrelpath(path) + except OSError: + return path + if len(np) < len(str(path)): + path = np + return path + + def repr_traceback(self, excinfo): + traceback = excinfo.traceback + if self.tbfilter: + traceback = traceback.filter() + + if is_recursion_error(excinfo): + traceback, extraline = self._truncate_recursive_traceback(traceback) + else: + extraline = None + + last = traceback[-1] + entries = [] + for index, entry in enumerate(traceback): + einfo = (last == entry) and excinfo or None + reprentry = self.repr_traceback_entry(entry, einfo) + entries.append(reprentry) + return ReprTraceback(entries, extraline, style=self.style) + + def _truncate_recursive_traceback(self, traceback): + """ + Truncate the given recursive traceback trying to find the starting point + of the recursion. + + The detection is done by going through each traceback entry and finding the + point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``. + + Handle the situation where the recursion process might raise an exception (for example + comparing numpy arrays using equality raises a TypeError), in which case we do our best to + warn the user of the error and show a limited traceback. + """ + try: + recursionindex = traceback.recursionindex() + except Exception as e: + max_frames = 10 + extraline = ( + "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" + " The following exception happened when comparing locals in the stack frame:\n" + " {exc_type}: {exc_msg}\n" + " Displaying first and last {max_frames} stack frames out of {total}." + ).format( + exc_type=type(e).__name__, + exc_msg=safe_str(e), + max_frames=max_frames, + total=len(traceback), + ) + traceback = traceback[:max_frames] + traceback[-max_frames:] + else: + if recursionindex is not None: + extraline = "!!! Recursion detected (same locals & position)" + traceback = traceback[: recursionindex + 1] + else: + extraline = None + + return traceback, extraline + + def repr_excinfo(self, excinfo): + if _PY2: + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + + return ReprExceptionInfo(reprtraceback, reprcrash) + else: + repr_chain = [] + e = excinfo.value + descr = None + seen = set() + while e is not None and id(e) not in seen: + seen.add(id(e)) + if excinfo: + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + else: + # fallback to native repr if the exception doesn't have a traceback: + # ExceptionInfo objects require a full traceback to work + reprtraceback = ReprTracebackNative( + traceback.format_exception(type(e), e, None) + ) + reprcrash = None + + repr_chain += [(reprtraceback, reprcrash, descr)] + if e.__cause__ is not None and self.chain: + e = e.__cause__ + excinfo = ( + ExceptionInfo((type(e), e, e.__traceback__)) + if e.__traceback__ + else None + ) + descr = "The above exception was the direct cause of the following exception:" + elif ( + e.__context__ is not None + and not e.__suppress_context__ + and self.chain + ): + e = e.__context__ + excinfo = ( + ExceptionInfo((type(e), e, e.__traceback__)) + if e.__traceback__ + else None + ) + descr = "During handling of the above exception, another exception occurred:" + else: + e = None + repr_chain.reverse() + return ExceptionChainRepr(repr_chain) + + +class TerminalRepr(object): + def __str__(self): + s = self.__unicode__() + if _PY2: + s = s.encode("utf-8") + return s + + def __unicode__(self): + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = py.io.TextIO() + tw = py.io.TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self): + return "<%s instance at %0x>" % (self.__class__, id(self)) + + +class ExceptionRepr(TerminalRepr): + def __init__(self): + self.sections = [] + + def addsection(self, name, content, sep="-"): + self.sections.append((name, content, sep)) + + def toterminal(self, tw): + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + + +class ExceptionChainRepr(ExceptionRepr): + def __init__(self, chain): + super(ExceptionChainRepr, self).__init__() + self.chain = chain + # reprcrash and reprtraceback of the outermost (the newest) exception + # in the chain + self.reprtraceback = chain[-1][0] + self.reprcrash = chain[-1][1] + + def toterminal(self, tw): + for element in self.chain: + element[0].toterminal(tw) + if element[2] is not None: + tw.line("") + tw.line(element[2], yellow=True) + super(ExceptionChainRepr, self).toterminal(tw) + + +class ReprExceptionInfo(ExceptionRepr): + def __init__(self, reprtraceback, reprcrash): + super(ReprExceptionInfo, self).__init__() + self.reprtraceback = reprtraceback + self.reprcrash = reprcrash + + def toterminal(self, tw): + self.reprtraceback.toterminal(tw) + super(ReprExceptionInfo, self).toterminal(tw) + + +class ReprTraceback(TerminalRepr): + entrysep = "_ " + + def __init__(self, reprentries, extraline, style): + self.reprentries = reprentries + self.extraline = extraline + self.style = style + + def toterminal(self, tw): + # the entries might have different styles + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i + 1] + if ( + entry.style == "long" + or entry.style == "short" + and next_entry.style == "long" + ): + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines): + self.style = "native" + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + + +class ReprEntryNative(TerminalRepr): + style = "native" + + def __init__(self, tblines): + self.lines = tblines + + def toterminal(self, tw): + tw.write("".join(self.lines)) + + +class ReprEntry(TerminalRepr): + def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style): + self.lines = lines + self.reprfuncargs = reprfuncargs + self.reprlocals = reprlocals + self.reprfileloc = filelocrepr + self.style = style + + def toterminal(self, tw): + if self.style == "short": + self.reprfileloc.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + # tw.line("") + return + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + if self.reprlocals: + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self): + return "%s\n%s\n%s" % ("\n".join(self.lines), self.reprlocals, self.reprfileloc) + + +class ReprFileLocation(TerminalRepr): + def __init__(self, path, lineno, message): + self.path = str(path) + self.lineno = lineno + self.message = message + + def toterminal(self, tw): + # filename and lineno output for each entry, + # using an output format that most editors unterstand + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.write(self.path, bold=True, red=True) + tw.line(":%s: %s" % (self.lineno, msg)) + + +class ReprLocals(TerminalRepr): + def __init__(self, lines): + self.lines = lines + + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + + +class ReprFuncArgs(TerminalRepr): + def __init__(self, args): + self.args = args + + def toterminal(self, tw): + if self.args: + linesofar = "" + for name, value in self.args: + ns = "%s = %s" % (safe_str(name), safe_str(value)) + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + +def getrawcode(obj, trycall=True): + """ return code object for given function. """ + try: + return obj.__code__ + except AttributeError: + obj = getattr(obj, "im_func", obj) + obj = getattr(obj, "func_code", obj) + obj = getattr(obj, "f_code", obj) + obj = getattr(obj, "__code__", obj) + if trycall and not hasattr(obj, "co_firstlineno"): + if hasattr(obj, "__call__") and not inspect.isclass(obj): + x = getrawcode(obj.__call__, trycall=False) + if hasattr(x, "co_firstlineno"): + return x + return obj + + +if PY35: # RecursionError introduced in 3.5 + + def is_recursion_error(excinfo): + return excinfo.errisinstance(RecursionError) # noqa + + +else: + + def is_recursion_error(excinfo): + if not excinfo.errisinstance(RuntimeError): + return False + try: + return "maximum recursion depth exceeded" in str(excinfo.value) + except UnicodeError: + return False + + +# relative paths that we use to filter traceback entries from appearing to the user; +# see filter_traceback +# note: if we need to add more paths than what we have now we should probably use a list +# for better maintenance + +_PLUGGY_DIR = py.path.local(pluggy.__file__.rstrip("oc")) +# pluggy is either a package or a single module depending on the version +if _PLUGGY_DIR.basename == "__init__.py": + _PLUGGY_DIR = _PLUGGY_DIR.dirpath() +_PYTEST_DIR = py.path.local(_pytest.__file__).dirpath() +_PY_DIR = py.path.local(py.__file__).dirpath() + + +def filter_traceback(entry): + """Return True if a TracebackEntry instance should be removed from tracebacks: + * dynamically generated code (no code to show up for it); + * internal traceback from pytest or its internal libraries, py and pluggy. + """ + # entry.path might sometimes return a str object when the entry + # points to dynamically generated code + # see https://bitbucket.org/pytest-dev/py/issues/71 + raw_filename = entry.frame.code.raw.co_filename + is_generated = "<" in raw_filename and ">" in raw_filename + if is_generated: + return False + # entry.path might point to a non-existing file, in which case it will + # also return a str object. see #1133 + p = py.path.local(entry.path) + return ( + not p.relto(_PLUGGY_DIR) and not p.relto(_PYTEST_DIR) and not p.relto(_PY_DIR) + ) diff --git a/env_27/lib/python2.7/site-packages/_pytest/_code/source.py b/env_27/lib/python2.7/site-packages/_pytest/_code/source.py new file mode 100644 index 0000000..39701a3 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/_code/source.py @@ -0,0 +1,328 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import inspect +import linecache +import sys +import textwrap +import tokenize +import warnings +from ast import PyCF_ONLY_AST as _AST_FLAG +from bisect import bisect_right + +import py +import six + + +class Source(object): + """ an immutable object holding a source code fragment, + possibly deindenting it. + """ + + _compilecounter = 0 + + def __init__(self, *parts, **kwargs): + self.lines = lines = [] + de = kwargs.get("deindent", True) + for part in parts: + if not part: + partlines = [] + elif isinstance(part, Source): + partlines = part.lines + elif isinstance(part, (tuple, list)): + partlines = [x.rstrip("\n") for x in part] + elif isinstance(part, six.string_types): + partlines = part.split("\n") + else: + partlines = getsource(part, deindent=de).lines + if de: + partlines = deindent(partlines) + lines.extend(partlines) + + def __eq__(self, other): + try: + return self.lines == other.lines + except AttributeError: + if isinstance(other, str): + return str(self) == other + return False + + __hash__ = None + + def __getitem__(self, key): + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + newsource = Source() + newsource.lines = self.lines[key.start : key.stop] + return newsource + + def __len__(self): + return len(self.lines) + + def strip(self): + """ return new source object with trailing + and leading blank lines removed. + """ + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end - 1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def putaround(self, before="", after="", indent=" " * 4): + """ return a copy of the source object with + 'before' and 'after' wrapped around it. + """ + before = Source(before) + after = Source(after) + newsource = Source() + lines = [(indent + line) for line in self.lines] + newsource.lines = before.lines + lines + after.lines + return newsource + + def indent(self, indent=" " * 4): + """ return a copy of the source object with + all lines indented by the given indent-string. + """ + newsource = Source() + newsource.lines = [(indent + line) for line in self.lines] + return newsource + + def getstatement(self, lineno): + """ return Source statement which contains the + given linenumber (counted from 0). + """ + start, end = self.getstatementrange(lineno) + return self[start:end] + + def getstatementrange(self, lineno): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + """ + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self): + """return a new source object deindented.""" + newsource = Source() + newsource.lines[:] = deindent(self.lines) + return newsource + + def isparseable(self, deindent=True): + """ return True if source is parseable, heuristically + deindenting it by default. + """ + from parser import suite as syntax_checker + + if deindent: + source = str(self.deindent()) + else: + source = str(self) + try: + # compile(source+'\n', "x", "exec") + syntax_checker(source + "\n") + except KeyboardInterrupt: + raise + except Exception: + return False + else: + return True + + def __str__(self): + return "\n".join(self.lines) + + def compile( + self, filename=None, mode="exec", flag=0, dont_inherit=0, _genframe=None + ): + """ return compiled code object. if filename is None + invent an artificial filename which displays + the source/line position of the caller frame. + """ + if not filename or py.path.local(filename).check(file=0): + if _genframe is None: + _genframe = sys._getframe(1) # the caller + fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno + base = "<%d-codegen " % self._compilecounter + self.__class__._compilecounter += 1 + if not filename: + filename = base + "%s:%d>" % (fn, lineno) + else: + filename = base + "%r %s:%d>" % (filename, fn, lineno) + source = "\n".join(self.lines) + "\n" + try: + co = compile(source, filename, mode, flag) + except SyntaxError: + ex = sys.exc_info()[1] + # re-represent syntax errors from parsing python strings + msglines = self.lines[: ex.lineno] + if ex.offset: + msglines.append(" " * ex.offset + "^") + msglines.append("(code was compiled probably from here: %s)" % filename) + newex = SyntaxError("\n".join(msglines)) + newex.offset = ex.offset + newex.lineno = ex.lineno + newex.text = ex.text + raise newex + else: + if flag & _AST_FLAG: + return co + lines = [(x + "\n") for x in self.lines] + linecache.cache[filename] = (1, None, lines, filename) + return co + + +# +# public API shortcut functions +# + + +def compile_(source, filename=None, mode="exec", flags=0, dont_inherit=0): + """ compile the given source to a raw code object, + and maintain an internal cache which allows later + retrieval of the source code for the code object + and any recursively created code objects. + """ + if isinstance(source, ast.AST): + # XXX should Source support having AST? + return compile(source, filename, mode, flags, dont_inherit) + _genframe = sys._getframe(1) # the caller + s = Source(source) + co = s.compile(filename, mode, flags, _genframe=_genframe) + return co + + +def getfslineno(obj): + """ Return source location (path, lineno) for the given object. + If the source cannot be determined return ("", -1). + + The line number is 0-based. + """ + from .code import Code + + try: + code = Code(obj) + except TypeError: + try: + fn = inspect.getsourcefile(obj) or inspect.getfile(obj) + except TypeError: + return "", -1 + + fspath = fn and py.path.local(fn) or None + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except IOError: + pass + else: + fspath = code.path + lineno = code.firstlineno + assert isinstance(lineno, int) + return fspath, lineno + + +# +# helper functions +# + + +def findsource(obj): + try: + sourcelines, lineno = inspect.findsource(obj) + except Exception: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + + +def getsource(obj, **kwargs): + from .code import getrawcode + + obj = getrawcode(obj) + try: + strsrc = inspect.getsource(obj) + except IndentationError: + strsrc = '"Buggy python version consider upgrading, cannot get source"' + assert isinstance(strsrc, str) + return Source(strsrc, **kwargs) + + +def deindent(lines): + return textwrap.dedent("\n".join(lines)).splitlines() + + +def get_statement_startend2(lineno, node): + import ast + + # flatten all statements and except handlers into one lineno-list + # AST's line numbers start indexing at 1 + values = [] + for x in ast.walk(node): + if isinstance(x, (ast.stmt, ast.ExceptHandler)): + values.append(x.lineno - 1) + for name in ("finalbody", "orelse"): + val = getattr(x, name, None) + if val: + # treat the finally/orelse part as its own statement + values.append(val[0].lineno - 1 - 1) + values.sort() + insert_index = bisect_right(values, lineno) + start = values[insert_index - 1] + if insert_index >= len(values): + end = None + else: + end = values[insert_index] + return start, end + + +def getstatementrange_ast(lineno, source, assertion=False, astnode=None): + if astnode is None: + content = str(source) + # See #4260: + # don't produce duplicate warnings when compiling source to find ast + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + astnode = compile(content, "source", "exec", _AST_FLAG) + + start, end = get_statement_startend2(lineno, astnode) + # we need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself + block_finder = inspect.BlockFinder() + # if we start with an indented line, put blockfinder to "started" mode + block_finder.started = source.lines[start][0].isspace() + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # the end might still point to a comment or empty line, correct it + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end diff --git a/env_27/lib/python2.7/site-packages/_pytest/_io/__init__.py b/env_27/lib/python2.7/site-packages/_pytest/_io/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/env_27/lib/python2.7/site-packages/_pytest/_io/saferepr.py b/env_27/lib/python2.7/site-packages/_pytest/_io/saferepr.py new file mode 100644 index 0000000..4d1d18d --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/_io/saferepr.py @@ -0,0 +1,72 @@ +import sys + +from six.moves import reprlib + + +class SafeRepr(reprlib.Repr): + """subclass of repr.Repr that limits the resulting size of repr() + and includes information on exceptions raised during the call. + """ + + def repr(self, x): + return self._callhelper(reprlib.Repr.repr, self, x) + + def repr_unicode(self, x, level): + # Strictly speaking wrong on narrow builds + def repr(u): + if "'" not in u: + return u"'%s'" % u + elif '"' not in u: + return u'"%s"' % u + else: + return u"'%s'" % u.replace("'", r"\'") + + s = repr(x[: self.maxstring]) + if len(s) > self.maxstring: + i = max(0, (self.maxstring - 3) // 2) + j = max(0, self.maxstring - 3 - i) + s = repr(x[:i] + x[len(x) - j :]) + s = s[:i] + "..." + s[len(s) - j :] + return s + + def repr_instance(self, x, level): + return self._callhelper(repr, x) + + def _callhelper(self, call, x, *args): + try: + # Try the vanilla repr and make sure that the result is a string + s = call(x, *args) + except Exception: + cls, e, tb = sys.exc_info() + exc_name = getattr(cls, "__name__", "unknown") + try: + exc_info = str(e) + except Exception: + exc_info = "unknown" + return '<[%s("%s") raised in repr()] %s object at 0x%x>' % ( + exc_name, + exc_info, + x.__class__.__name__, + id(x), + ) + else: + if len(s) > self.maxsize: + i = max(0, (self.maxsize - 3) // 2) + j = max(0, self.maxsize - 3 - i) + s = s[:i] + "..." + s[len(s) - j :] + return s + + +def saferepr(obj, maxsize=240): + """return a size-limited safe repr-string for the given object. + Failing __repr__ functions of user instances will be represented + with a short exception info and 'saferepr' generally takes + care to never raise exceptions itself. This function is a wrapper + around the Repr/reprlib functionality of the standard 2.6 lib. + """ + # review exception handling + srepr = SafeRepr() + srepr.maxstring = maxsize + srepr.maxsize = maxsize + srepr.maxother = 160 + return srepr.repr(obj) diff --git a/env_27/lib/python2.7/site-packages/_pytest/_version.py b/env_27/lib/python2.7/site-packages/_pytest/_version.py new file mode 100644 index 0000000..3202f53 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/_version.py @@ -0,0 +1,4 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '4.4.0' diff --git a/env_27/lib/python2.7/site-packages/_pytest/assertion/__init__.py b/env_27/lib/python2.7/site-packages/_pytest/assertion/__init__.py new file mode 100644 index 0000000..b5c846c --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/assertion/__init__.py @@ -0,0 +1,155 @@ +""" +support for presenting detailed information in failing assertions. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys + +import six + +from _pytest.assertion import rewrite +from _pytest.assertion import truncate +from _pytest.assertion import util + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption( + "--assert", + action="store", + dest="assertmode", + choices=("rewrite", "plain"), + default="rewrite", + metavar="MODE", + help="""Control assertion debugging tools. 'plain' + performs no assertion debugging. 'rewrite' + (the default) rewrites assert statements in + test modules on import to provide assert + expression information.""", + ) + + +def register_assert_rewrite(*names): + """Register one or more module names to be rewritten on import. + + This function will make sure that this module or all modules inside + the package will get their assert statements rewritten. + Thus you should make sure to call this before the module is + actually imported, usually in your __init__.py if you are a plugin + using a package. + + :raise TypeError: if the given module names are not strings. + """ + for name in names: + if not isinstance(name, str): + msg = "expected module names as *args, got {0} instead" + raise TypeError(msg.format(repr(names))) + for hook in sys.meta_path: + if isinstance(hook, rewrite.AssertionRewritingHook): + importhook = hook + break + else: + importhook = DummyRewriteHook() + importhook.mark_rewrite(*names) + + +class DummyRewriteHook(object): + """A no-op import hook for when rewriting is disabled.""" + + def mark_rewrite(self, *names): + pass + + +class AssertionState(object): + """State for the assertion plugin.""" + + def __init__(self, config, mode): + self.mode = mode + self.trace = config.trace.root.get("assertion") + self.hook = None + + +def install_importhook(config): + """Try to install the rewrite hook, raise SystemError if it fails.""" + # Jython has an AST bug that make the assertion rewriting hook malfunction. + if sys.platform.startswith("java"): + raise SystemError("rewrite not supported") + + config._assertstate = AssertionState(config, "rewrite") + config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config) + sys.meta_path.insert(0, hook) + config._assertstate.trace("installed rewrite import hook") + + def undo(): + hook = config._assertstate.hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + + config.add_cleanup(undo) + return hook + + +def pytest_collection(session): + # this hook is only called when test modules are collected + # so for example not in the master process of pytest-xdist + # (which does not collect test modules) + assertstate = getattr(session.config, "_assertstate", None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(session) + + +def pytest_runtest_setup(item): + """Setup the pytest_assertrepr_compare hook + + The newinterpret and rewrite modules will use util._reprcompare if + it exists to use custom reporting via the + pytest_assertrepr_compare hook. This sets up this custom + comparison for the test. + """ + + def callbinrepr(op, left, right): + """Call the pytest_assertrepr_compare hook and prepare the result + + This uses the first result from the hook and then ensures the + following: + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). + * Embedded newlines are escaped to help util.format_explanation() + later. + * If the rewrite mode is used embedded %-characters are replaced + to protect later % formatting. + + The result can be formatted by util.format_explanation() for + pretty printing. + """ + hook_result = item.ihook.pytest_assertrepr_compare( + config=item.config, op=op, left=left, right=right + ) + for new_expl in hook_result: + if new_expl: + new_expl = truncate.truncate_if_required(new_expl, item) + new_expl = [line.replace("\n", "\\n") for line in new_expl] + res = six.text_type("\n~").join(new_expl) + if item.config.getvalue("assertmode") == "rewrite": + res = res.replace("%", "%%") + return res + + util._reprcompare = callbinrepr + + +def pytest_runtest_teardown(item): + util._reprcompare = None + + +def pytest_sessionfinish(session): + assertstate = getattr(session.config, "_assertstate", None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(None) + + +# Expose this plugin's implementation for the pytest_assertrepr_compare hook +pytest_assertrepr_compare = util.assertrepr_compare diff --git a/env_27/lib/python2.7/site-packages/_pytest/assertion/rewrite.py b/env_27/lib/python2.7/site-packages/_pytest/assertion/rewrite.py new file mode 100644 index 0000000..18506d2 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/assertion/rewrite.py @@ -0,0 +1,1080 @@ +"""Rewrite assertion AST to produce nice error messages""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import errno +import imp +import itertools +import marshal +import os +import re +import string +import struct +import sys +import types + +import atomicwrites +import py +import six + +from _pytest._io.saferepr import saferepr +from _pytest.assertion import util +from _pytest.assertion.util import ( # noqa: F401 + format_explanation as _format_explanation, +) +from _pytest.compat import spec_from_file_location +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import PurePath + +# pytest caches rewritten pycs in __pycache__. +if hasattr(imp, "get_tag"): + PYTEST_TAG = imp.get_tag() + "-PYTEST" +else: + if hasattr(sys, "pypy_version_info"): + impl = "pypy" + elif sys.platform == "java": + impl = "jython" + else: + impl = "cpython" + ver = sys.version_info + PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) + del ver, impl + +PYC_EXT = ".py" + (__debug__ and "c" or "o") +PYC_TAIL = "." + PYTEST_TAG + PYC_EXT + +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 + +if sys.version_info >= (3, 5): + ast_Call = ast.Call +else: + + def ast_Call(a, b, c): + return ast.Call(a, b, c, None, None) + + +class AssertionRewritingHook(object): + """PEP302 Import hook which rewrites asserts.""" + + def __init__(self, config): + self.config = config + self.fnpats = config.getini("python_files") + self.session = None + self.modules = {} + self._rewritten_names = set() + self._register_with_pkg_resources() + self._must_rewrite = set() + # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, + # which might result in infinite recursion (#3506) + self._writing_pyc = False + self._basenames_to_check_rewrite = {"conftest"} + self._marked_for_rewrite_cache = {} + self._session_paths_checked = False + + def set_session(self, session): + self.session = session + self._session_paths_checked = False + + def _imp_find_module(self, name, path=None): + """Indirection so we can mock calls to find_module originated from the hook during testing""" + return imp.find_module(name, path) + + def find_module(self, name, path=None): + if self._writing_pyc: + return None + state = self.config._assertstate + if self._early_rewrite_bailout(name, state): + return None + state.trace("find_module called for: %s" % name) + names = name.rsplit(".", 1) + lastname = names[-1] + pth = None + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] + if pth is None: + try: + fd, fn, desc = self._imp_find_module(lastname, path) + except ImportError: + return None + if fd is not None: + fd.close() + tp = desc[2] + if tp == imp.PY_COMPILED: + if hasattr(imp, "source_from_cache"): + try: + fn = imp.source_from_cache(fn) + except ValueError: + # Python 3 doesn't like orphaned but still-importable + # .pyc files. + fn = fn[:-1] + else: + fn = fn[:-1] + elif tp != imp.PY_SOURCE: + # Don't know what this is. + return None + else: + fn = os.path.join(pth, name.rpartition(".")[2] + ".py") + + fn_pypath = py.path.local(fn) + if not self._should_rewrite(name, fn_pypath, state): + return None + + self._rewritten_names.add(name) + + # The requested module looks like a test file, so rewrite it. This is + # the most magical part of the process: load the source, rewrite the + # asserts, and load the rewritten source. We also cache the rewritten + # module code in a special pyc. We must be aware of the possibility of + # concurrent pytest processes rewriting and loading pycs. To avoid + # tricky race conditions, we maintain the following invariant: The + # cached pyc is always a complete, valid pyc. Operations on it must be + # atomic. POSIX's atomic rename comes in handy. + write = not sys.dont_write_bytecode + cache_dir = os.path.join(fn_pypath.dirname, "__pycache__") + if write: + try: + os.mkdir(cache_dir) + except OSError: + e = sys.exc_info()[1].errno + if e == errno.EEXIST: + # Either the __pycache__ directory already exists (the + # common case) or it's blocked by a non-dir node. In the + # latter case, we'll ignore it in _write_pyc. + pass + elif e in [errno.ENOENT, errno.ENOTDIR]: + # One of the path components was not a directory, likely + # because we're in a zip file. + write = False + elif e in [errno.EACCES, errno.EROFS, errno.EPERM]: + state.trace("read only directory: %r" % fn_pypath.dirname) + write = False + else: + raise + cache_name = fn_pypath.basename[:-3] + PYC_TAIL + pyc = os.path.join(cache_dir, cache_name) + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... + co = _read_pyc(fn_pypath, pyc, state.trace) + if co is None: + state.trace("rewriting %r" % (fn,)) + source_stat, co = _rewrite_test(self.config, fn_pypath) + if co is None: + # Probably a SyntaxError in the test. + return None + if write: + self._writing_pyc = True + try: + _write_pyc(state, co, source_stat, pyc) + finally: + self._writing_pyc = False + else: + state.trace("found cached rewritten pyc for %r" % (fn,)) + self.modules[name] = co, pyc + return self + + def _early_rewrite_bailout(self, name, state): + """ + This is a fast way to get out of rewriting modules. Profiling has + shown that the call to imp.find_module (inside of the find_module + from this class) is a major slowdown, so, this method tries to + filter what we're sure won't be rewritten before getting to it. + """ + if self.session is not None and not self._session_paths_checked: + self._session_paths_checked = True + for path in self.session._initialpaths: + # Make something as c:/projects/my_project/path.py -> + # ['c:', 'projects', 'my_project', 'path.py'] + parts = str(path).split(os.path.sep) + # add 'path' to basenames to be checked. + self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) + + # Note: conftest already by default in _basenames_to_check_rewrite. + parts = name.split(".") + if parts[-1] in self._basenames_to_check_rewrite: + return False + + # For matching the name it must be as if it was a filename. + path = PurePath(os.path.sep.join(parts) + ".py") + + for pat in self.fnpats: + # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based + # on the name alone because we need to match against the full path + if os.path.dirname(pat): + return False + if fnmatch_ex(pat, path): + return False + + if self._is_marked_for_rewrite(name, state): + return False + + state.trace("early skip of rewriting module: %s" % (name,)) + return True + + def _should_rewrite(self, name, fn_pypath, state): + # always rewrite conftest files + fn = str(fn_pypath) + if fn_pypath.basename == "conftest.py": + state.trace("rewriting conftest file: %r" % (fn,)) + return True + + if self.session is not None: + if self.session.isinitpath(fn): + state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + return True + + # modules not passed explicitly on the command line are only + # rewritten if they match the naming convention for test files + for pat in self.fnpats: + if fn_pypath.fnmatch(pat): + state.trace("matched test file %r" % (fn,)) + return True + + return self._is_marked_for_rewrite(name, state) + + def _is_marked_for_rewrite(self, name, state): + try: + return self._marked_for_rewrite_cache[name] + except KeyError: + for marked in self._must_rewrite: + if name == marked or name.startswith(marked + "."): + state.trace("matched marked file %r (from %r)" % (name, marked)) + self._marked_for_rewrite_cache[name] = True + return True + + self._marked_for_rewrite_cache[name] = False + return False + + def mark_rewrite(self, *names): + """Mark import names as needing to be rewritten. + + The named module or package as well as any nested modules will + be rewritten on import. + """ + already_imported = ( + set(names).intersection(sys.modules).difference(self._rewritten_names) + ) + for name in already_imported: + if not AssertionRewriter.is_rewrite_disabled( + sys.modules[name].__doc__ or "" + ): + self._warn_already_imported(name) + self._must_rewrite.update(names) + self._marked_for_rewrite_cache.clear() + + def _warn_already_imported(self, name): + from _pytest.warning_types import PytestWarning + from _pytest.warnings import _issue_warning_captured + + _issue_warning_captured( + PytestWarning("Module already imported so cannot be rewritten: %s" % name), + self.config.hook, + stacklevel=5, + ) + + def load_module(self, name): + co, pyc = self.modules.pop(name) + if name in sys.modules: + # If there is an existing module object named 'fullname' in + # sys.modules, the loader must use that existing module. (Otherwise, + # the reload() builtin will not work correctly.) + mod = sys.modules[name] + else: + # I wish I could just call imp.load_compiled here, but __file__ has to + # be set properly. In Python 3.2+, this all would be handled correctly + # by load_compiled. + mod = sys.modules[name] = imp.new_module(name) + try: + mod.__file__ = co.co_filename + # Normally, this attribute is 3.2+. + mod.__cached__ = pyc + mod.__loader__ = self + # Normally, this attribute is 3.4+ + mod.__spec__ = spec_from_file_location(name, co.co_filename, loader=self) + six.exec_(co, mod.__dict__) + except: # noqa + if name in sys.modules: + del sys.modules[name] + raise + return sys.modules[name] + + def is_package(self, name): + try: + fd, fn, desc = self._imp_find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + def get_data(self, pathname): + """Optional PEP302 get_data API. + """ + with open(pathname, "rb") as f: + return f.read() + + +def _write_pyc(state, co, source_stat, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) + try: + with atomicwrites.atomic_write(pyc, mode="wb", overwrite=True) as fp: + fp.write(imp.get_magic()) + # as of now, bytecode header expects 32-bit numbers for size and mtime (#4903) + mtime = int(source_stat.mtime) & 0xFFFFFFFF + size = source_stat.size & 0xFFFFFFFF + # ">", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in", +} +# Python 3.5+ compatibility +try: + binop_map[ast.MatMult] = "@" +except AttributeError: + pass + +# Python 3.4+ compatibility +if hasattr(ast, "NameConstant"): + _NameConstant = ast.NameConstant +else: + + def _NameConstant(c): + return ast.Name(str(c), ast.Load()) + + +def set_location(node, lineno, col_offset): + """Set node location information recursively.""" + + def _fix(node, lineno, col_offset): + if "lineno" in node._attributes: + node.lineno = lineno + if "col_offset" in node._attributes: + node.col_offset = col_offset + for child in ast.iter_child_nodes(node): + _fix(child, lineno, col_offset) + + _fix(node, lineno, col_offset) + return node + + +class AssertionRewriter(ast.NodeVisitor): + """Assertion rewriting implementation. + + The main entrypoint is to call .run() with an ast.Module instance, + this will then find all the assert statements and rewrite them to + provide intermediate values and a detailed assertion error. See + http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html + for an overview of how this works. + + The entry point here is .run() which will iterate over all the + statements in an ast.Module and for each ast.Assert statement it + finds call .visit() with it. Then .visit_Assert() takes over and + is responsible for creating new ast statements to replace the + original assert statement: it rewrites the test of an assertion + to provide intermediate values and replace it with an if statement + which raises an assertion error with a detailed explanation in + case the expression is false. + + For this .visit_Assert() uses the visitor pattern to visit all the + AST nodes of the ast.Assert.test field, each visit call returning + an AST node and the corresponding explanation string. During this + state is kept in several instance attributes: + + :statements: All the AST statements which will replace the assert + statement. + + :variables: This is populated by .variable() with each variable + used by the statements so that they can all be set to None at + the end of the statements. + + :variable_counter: Counter to create new unique variables needed + by statements. Variables are created using .variable() and + have the form of "@py_assert0". + + :on_failure: The AST statements which will be executed if the + assertion test fails. This is the code which will construct + the failure message and raises the AssertionError. + + :explanation_specifiers: A dict filled by .explanation_param() + with %-formatting placeholders and their corresponding + expressions to use in the building of an assertion message. + This is used by .pop_format_context() to build a message. + + :stack: A stack of the explanation_specifiers dicts maintained by + .push_format_context() and .pop_format_context() which allows + to build another %-formatted string while already building one. + + This state is reset on every new assert statement visited and used + by the other visitors. + + """ + + def __init__(self, module_path, config): + super(AssertionRewriter, self).__init__() + self.module_path = module_path + self.config = config + + def run(self, mod): + """Find all assert statements in *mod* and rewrite them.""" + if not mod.body: + # Nothing to do. + return + # Insert some special imports at the top of the module but after any + # docstrings and __future__ imports. + aliases = [ + ast.alias(six.moves.builtins.__name__, "@py_builtins"), + ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), + ] + doc = getattr(mod, "docstring", None) + expect_docstring = doc is None + if doc is not None and self.is_rewrite_disabled(doc): + return + pos = 0 + lineno = 1 + for item in mod.body: + if ( + expect_docstring + and isinstance(item, ast.Expr) + and isinstance(item.value, ast.Str) + ): + doc = item.value.s + if self.is_rewrite_disabled(doc): + return + expect_docstring = False + elif ( + not isinstance(item, ast.ImportFrom) + or item.level > 0 + or item.module != "__future__" + ): + lineno = item.lineno + break + pos += 1 + else: + lineno = item.lineno + imports = [ + ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases + ] + mod.body[pos:pos] = imports + # Collect asserts. + nodes = [mod] + while nodes: + node = nodes.pop() + for name, field in ast.iter_fields(node): + if isinstance(field, list): + new = [] + for i, child in enumerate(field): + if isinstance(child, ast.Assert): + # Transform assert. + new.extend(self.visit(child)) + else: + new.append(child) + if isinstance(child, ast.AST): + nodes.append(child) + setattr(node, name, new) + elif ( + isinstance(field, ast.AST) + # Don't recurse into expressions as they can't contain + # asserts. + and not isinstance(field, ast.expr) + ): + nodes.append(field) + + @staticmethod + def is_rewrite_disabled(docstring): + return "PYTEST_DONT_REWRITE" in docstring + + def variable(self): + """Get a new variable.""" + # Use a character invalid in python identifiers to avoid clashing. + name = "@py_assert" + str(next(self.variable_counter)) + self.variables.append(name) + return name + + def assign(self, expr): + """Give *expr* a name.""" + name = self.variable() + self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) + return ast.Name(name, ast.Load()) + + def display(self, expr): + """Call saferepr on the expression.""" + return self.helper("saferepr", expr) + + def helper(self, name, *args): + """Call a helper in this module.""" + py_name = ast.Name("@pytest_ar", ast.Load()) + attr = ast.Attribute(py_name, "_" + name, ast.Load()) + return ast_Call(attr, list(args), []) + + def builtin(self, name): + """Return the builtin called *name*.""" + builtin_name = ast.Name("@py_builtins", ast.Load()) + return ast.Attribute(builtin_name, name, ast.Load()) + + def explanation_param(self, expr): + """Return a new named %-formatting placeholder for expr. + + This creates a %-formatting placeholder for expr in the + current formatting context, e.g. ``%(py0)s``. The placeholder + and expr are placed in the current format context so that it + can be used on the next call to .pop_format_context(). + + """ + specifier = "py" + str(next(self.variable_counter)) + self.explanation_specifiers[specifier] = expr + return "%(" + specifier + ")s" + + def push_format_context(self): + """Create a new formatting context. + + The format context is used for when an explanation wants to + have a variable value formatted in the assertion message. In + this case the value required can be added using + .explanation_param(). Finally .pop_format_context() is used + to format a string of %-formatted values as added by + .explanation_param(). + + """ + self.explanation_specifiers = {} + self.stack.append(self.explanation_specifiers) + + def pop_format_context(self, expl_expr): + """Format the %-formatted string with current format context. + + The expl_expr should be an ast.Str instance constructed from + the %-placeholders created by .explanation_param(). This will + add the required code to format said string to .on_failure and + return the ast.Name instance of the formatted string. + + """ + current = self.stack.pop() + if self.stack: + self.explanation_specifiers = self.stack[-1] + keys = [ast.Str(key) for key in current.keys()] + format_dict = ast.Dict(keys, list(current.values())) + form = ast.BinOp(expl_expr, ast.Mod(), format_dict) + name = "@py_format" + str(next(self.variable_counter)) + self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form)) + return ast.Name(name, ast.Load()) + + def generic_visit(self, node): + """Handle expressions we don't have custom code for.""" + assert isinstance(node, ast.expr) + res = self.assign(node) + return res, self.explanation_param(self.display(res)) + + def visit_Assert(self, assert_): + """Return the AST statements to replace the ast.Assert instance. + + This rewrites the test of an assertion to provide + intermediate values and replace it with an if statement which + raises an assertion error with a detailed explanation in case + the expression is false. + + """ + if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: + from _pytest.warning_types import PytestWarning + import warnings + + warnings.warn_explicit( + PytestWarning("assertion is always true, perhaps remove parentheses?"), + category=None, + filename=str(self.module_path), + lineno=assert_.lineno, + ) + + self.statements = [] + self.variables = [] + self.variable_counter = itertools.count() + self.stack = [] + self.on_failure = [] + self.push_format_context() + # Rewrite assert into a bunch of statements. + top_condition, explanation = self.visit(assert_.test) + # If in a test module, check if directly asserting None, in order to warn [Issue #3191] + if self.module_path is not None: + self.statements.append( + self.warn_about_none_ast( + top_condition, module_path=self.module_path, lineno=assert_.lineno + ) + ) + # Create failure message. + body = self.on_failure + negation = ast.UnaryOp(ast.Not(), top_condition) + self.statements.append(ast.If(negation, body, [])) + if assert_.msg: + assertmsg = self.helper("format_assertmsg", assert_.msg) + explanation = "\n>assert " + explanation + else: + assertmsg = ast.Str("") + explanation = "assert " + explanation + template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation)) + msg = self.pop_format_context(template) + fmt = self.helper("format_explanation", msg) + err_name = ast.Name("AssertionError", ast.Load()) + exc = ast_Call(err_name, [fmt], []) + if sys.version_info[0] >= 3: + raise_ = ast.Raise(exc, None) + else: + raise_ = ast.Raise(exc, None, None) + body.append(raise_) + # Clear temporary variables by setting them to None. + if self.variables: + variables = [ast.Name(name, ast.Store()) for name in self.variables] + clear = ast.Assign(variables, _NameConstant(None)) + self.statements.append(clear) + # Fix line numbers. + for stmt in self.statements: + set_location(stmt, assert_.lineno, assert_.col_offset) + return self.statements + + def warn_about_none_ast(self, node, module_path, lineno): + """ + Returns an AST issuing a warning if the value of node is `None`. + This is used to warn the user when asserting a function that asserts + internally already. + See issue #3191 for more details. + """ + + # Using parse because it is different between py2 and py3. + AST_NONE = ast.parse("None").body[0].value + val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE]) + send_warning = ast.parse( + """ +from _pytest.warning_types import PytestWarning +from warnings import warn_explicit +warn_explicit( + PytestWarning('asserting the value None, please use "assert is None"'), + category=None, + filename={filename!r}, + lineno={lineno}, +) + """.format( + filename=module_path.strpath, lineno=lineno + ) + ).body + return ast.If(val_is_none, send_warning, []) + + def visit_Name(self, name): + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. + locs = ast_Call(self.builtin("locals"), [], []) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) + return name, self.explanation_param(expr) + + def visit_BoolOp(self, boolop): + res_var = self.variable() + expl_list = self.assign(ast.List([], ast.Load())) + app = ast.Attribute(expl_list, "append", ast.Load()) + is_or = int(isinstance(boolop.op, ast.Or)) + body = save = self.statements + fail_save = self.on_failure + levels = len(boolop.values) - 1 + self.push_format_context() + # Process each operand, short-circuting if needed. + for i, v in enumerate(boolop.values): + if i: + fail_inner = [] + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa + self.on_failure = fail_inner + self.push_format_context() + res, expl = self.visit(v) + body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) + expl_format = self.pop_format_context(ast.Str(expl)) + call = ast_Call(app, [expl_format], []) + self.on_failure.append(ast.Expr(call)) + if i < levels: + cond = res + if is_or: + cond = ast.UnaryOp(ast.Not(), cond) + inner = [] + self.statements.append(ast.If(cond, inner, [])) + self.statements = body = inner + self.statements = save + self.on_failure = fail_save + expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or)) + expl = self.pop_format_context(expl_template) + return ast.Name(res_var, ast.Load()), self.explanation_param(expl) + + def visit_UnaryOp(self, unary): + pattern = unary_map[unary.op.__class__] + operand_res, operand_expl = self.visit(unary.operand) + res = self.assign(ast.UnaryOp(unary.op, operand_res)) + return res, pattern % (operand_expl,) + + def visit_BinOp(self, binop): + symbol = binop_map[binop.op.__class__] + left_expr, left_expl = self.visit(binop.left) + right_expr, right_expl = self.visit(binop.right) + explanation = "(%s %s %s)" % (left_expl, symbol, right_expl) + res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) + return res, explanation + + def visit_Call_35(self, call): + """ + visit `ast.Call` nodes on Python3.5 and after + """ + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + for arg in call.args: + res, expl = self.visit(arg) + arg_expls.append(expl) + new_args.append(res) + for keyword in call.keywords: + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + if keyword.arg: + arg_expls.append(keyword.arg + "=" + expl) + else: # **args have `arg` keywords with an .arg of None + arg_expls.append("**" + expl) + + expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) + return res, outer_expl + + def visit_Starred(self, starred): + # From Python 3.5, a Starred node can appear in a function call + res, expl = self.visit(starred.value) + new_starred = ast.Starred(res, starred.ctx) + return new_starred, "*" + expl + + def visit_Call_legacy(self, call): + """ + visit `ast.Call nodes on 3.4 and below` + """ + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + new_star = new_kwarg = None + for arg in call.args: + res, expl = self.visit(arg) + new_args.append(res) + arg_expls.append(expl) + for keyword in call.keywords: + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + arg_expls.append(keyword.arg + "=" + expl) + if call.starargs: + new_star, expl = self.visit(call.starargs) + arg_expls.append("*" + expl) + if call.kwargs: + new_kwarg, expl = self.visit(call.kwargs) + arg_expls.append("**" + expl) + expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) + return res, outer_expl + + # ast.Call signature changed on 3.5, + # conditionally change which methods is named + # visit_Call depending on Python version + if sys.version_info >= (3, 5): + visit_Call = visit_Call_35 + else: + visit_Call = visit_Call_legacy + + def visit_Attribute(self, attr): + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + value, value_expl = self.visit(attr.value) + res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) + res_expl = self.explanation_param(self.display(res)) + pat = "%s\n{%s = %s.%s\n}" + expl = pat % (res_expl, res_expl, value_expl, attr.attr) + return res, expl + + def visit_Compare(self, comp): + self.push_format_context() + left_res, left_expl = self.visit(comp.left) + if isinstance(comp.left, (ast.Compare, ast.BoolOp)): + left_expl = "({})".format(left_expl) + res_variables = [self.variable() for i in range(len(comp.ops))] + load_names = [ast.Name(v, ast.Load()) for v in res_variables] + store_names = [ast.Name(v, ast.Store()) for v in res_variables] + it = zip(range(len(comp.ops)), comp.ops, comp.comparators) + expls = [] + syms = [] + results = [left_res] + for i, op, next_operand in it: + next_res, next_expl = self.visit(next_operand) + if isinstance(next_operand, (ast.Compare, ast.BoolOp)): + next_expl = "({})".format(next_expl) + results.append(next_res) + sym = binop_map[op.__class__] + syms.append(ast.Str(sym)) + expl = "%s %s %s" % (left_expl, sym, next_expl) + expls.append(ast.Str(expl)) + res_expr = ast.Compare(left_res, [op], [next_res]) + self.statements.append(ast.Assign([store_names[i]], res_expr)) + left_res, left_expl = next_res, next_expl + # Use pytest.assertion.util._reprcompare if that's available. + expl_call = self.helper( + "call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load()), + ) + if len(comp.ops) > 1: + res = ast.BoolOp(ast.And(), load_names) + else: + res = load_names[0] + return res, self.explanation_param(self.pop_format_context(expl_call)) diff --git a/env_27/lib/python2.7/site-packages/_pytest/assertion/truncate.py b/env_27/lib/python2.7/site-packages/_pytest/assertion/truncate.py new file mode 100644 index 0000000..69d6acd --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/assertion/truncate.py @@ -0,0 +1,101 @@ +""" +Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +~8 terminal lines, unless running in "-vv" mode or running on CI. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import six + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = 8 * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required(explanation, item, max_length=None): + """ + Truncate this assertion explanation if the given test item is eligible. + """ + if _should_truncate_item(item): + return _truncate_explanation(explanation) + return explanation + + +def _should_truncate_item(item): + """ + Whether or not this test item is eligible for truncation. + """ + verbose = item.config.option.verbose + return verbose < 2 and not _running_on_ci() + + +def _running_on_ci(): + """Check if we're currently running on a CI system.""" + env_vars = ["CI", "BUILD_NUMBER"] + return any(var in os.environ for var in env_vars) + + +def _truncate_explanation(input_lines, max_lines=None, max_chars=None): + """ + Truncate given list of strings that makes up the assertion explanation. + + Truncates to either 8 lines, or 640 characters - whichever the input reaches + first. The remaining lines will be replaced by a usage message. + """ + + if max_lines is None: + max_lines = DEFAULT_MAX_LINES + if max_chars is None: + max_chars = DEFAULT_MAX_CHARS + + # Check if truncation required + input_char_count = len("".join(input_lines)) + if len(input_lines) <= max_lines and input_char_count <= max_chars: + return input_lines + + # Truncate first to max_lines, and then truncate to max_chars if max_chars + # is exceeded. + truncated_explanation = input_lines[:max_lines] + truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars) + + # Add ellipsis to final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + + # Append useful message to explanation + truncated_line_count = len(input_lines) - len(truncated_explanation) + truncated_line_count += 1 # Account for the part-truncated final line + msg = "...Full output truncated" + if truncated_line_count == 1: + msg += " ({} line hidden)".format(truncated_line_count) + else: + msg += " ({} lines hidden)".format(truncated_line_count) + msg += ", {}".format(USAGE_MSG) + truncated_explanation.extend([six.text_type(""), six.text_type(msg)]) + return truncated_explanation + + +def _truncate_by_char_count(input_lines, max_chars): + # Check if truncation required + if len("".join(input_lines)) <= max_chars: + return input_lines + + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/env_27/lib/python2.7/site-packages/_pytest/assertion/util.py b/env_27/lib/python2.7/site-packages/_pytest/assertion/util.py new file mode 100644 index 0000000..ab01c31 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/assertion/util.py @@ -0,0 +1,397 @@ +"""Utilities for assertion debugging""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pprint + +import six + +import _pytest._code +from ..compat import Sequence +from _pytest import outcomes +from _pytest._io.saferepr import saferepr + +# The _reprcompare attribute on the util module is used by the new assertion +# interpretation code and assertion rewriter to detect this plugin was +# loaded and in turn call the hooks defined here as part of the +# DebugInterpreter. +_reprcompare = None + + +# the re-encoding is needed for python2 repr +# with non-ascii characters (see issue 877 and 1379) +def ecu(s): + if isinstance(s, bytes): + return s.decode("UTF-8", "replace") + else: + return s + + +def format_explanation(explanation): + """This formats an explanation + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + explanation = ecu(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u"\n".join(result) + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u"").split("\n") + lines = [raw_lines[0]] + for values in raw_lines[1:]: + if values and values[0] in ["{", "}", "~", ">"]: + lines.append(values) + else: + lines[-1] += "\\n" + values + return lines + + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ + result = lines[:1] + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith("{"): + if stackcnt[-1]: + s = u"and " + else: + s = u"where " + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(u" +" + u" " * (len(stack) - 1) + s + line[1:]) + elif line.startswith("}"): + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line[0] in ["~", ">"] + stack[-1] += 1 + indent = len(stack) if line.startswith("~") else len(stack) - 1 + result.append(u" " * indent + line[1:]) + assert len(stack) == 1 + return result + + +# Provide basestring in python3 +try: + basestring = basestring +except NameError: + basestring = str + + +def issequence(x): + return isinstance(x, Sequence) and not isinstance(x, basestring) + + +def istext(x): + return isinstance(x, basestring) + + +def isdict(x): + return isinstance(x, dict) + + +def isset(x): + return isinstance(x, (set, frozenset)) + + +def isdatacls(obj): + return getattr(obj, "__dataclass_fields__", None) is not None + + +def isattrs(obj): + return getattr(obj, "__attrs_attrs__", None) is not None + + +def isiterable(obj): + try: + iter(obj) + return not istext(obj) + except TypeError: + return False + + +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op + left_repr = saferepr(left, maxsize=int(width // 2)) + right_repr = saferepr(right, maxsize=width - len(left_repr)) + + summary = u"%s %s %s" % (ecu(left_repr), op, ecu(right_repr)) + + verbose = config.getoption("verbose") + explanation = None + try: + if op == "==": + if istext(left) and istext(right): + explanation = _diff_text(left, right, verbose) + else: + if issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right, verbose) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right, verbose) + elif isdict(left) and isdict(right): + explanation = _compare_eq_dict(left, right, verbose) + elif type(left) == type(right) and (isdatacls(left) or isattrs(left)): + type_fn = (isdatacls, isattrs) + explanation = _compare_eq_cls(left, right, verbose, type_fn) + elif verbose > 0: + explanation = _compare_eq_verbose(left, right) + if isiterable(left) and isiterable(right): + expl = _compare_eq_iterable(left, right, verbose) + if explanation is not None: + explanation.extend(expl) + else: + explanation = expl + elif op == "not in": + if istext(left) and istext(right): + explanation = _notin_text(left, right, verbose) + except outcomes.Exit: + raise + except Exception: + explanation = [ + u"(pytest_assertion plugin: representation of details failed. " + u"Probably an object has a faulty __repr__.)", + six.text_type(_pytest._code.ExceptionInfo.from_current()), + ] + + if not explanation: + return None + + return [summary] + explanation + + +def _diff_text(left, right, verbose=0): + """Return the explanation for the diff between text or bytes. + + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. + """ + from difflib import ndiff + + explanation = [] + + def escape_for_readable_diff(binary_text): + """ + Ensures that the internal string is always valid unicode, converting any bytes safely to valid unicode. + This is done using repr() which then needs post-processing to fix the encompassing quotes and un-escape + newlines and carriage returns (#429). + """ + r = six.text_type(repr(binary_text)[1:-1]) + r = r.replace(r"\n", "\n") + r = r.replace(r"\r", "\r") + return r + + if isinstance(left, bytes): + left = escape_for_readable_diff(left) + if isinstance(right, bytes): + right = escape_for_readable_diff(right) + if verbose < 1: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = [ + u"Skipping %s identical leading characters in diff, use -v to show" % i + ] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [ + u"Skipping {} identical trailing " + u"characters in diff, use -v to show".format(i) + ] + left = left[:-i] + right = right[:-i] + keepends = True + if left.isspace() or right.isspace(): + left = repr(str(left)) + right = repr(str(right)) + explanation += [u"Strings contain only whitespace, escaping them using repr()"] + explanation += [ + line.strip("\n") + for line in ndiff(left.splitlines(keepends), right.splitlines(keepends)) + ] + return explanation + + +def _compare_eq_verbose(left, right): + keepends = True + left_lines = repr(left).splitlines(keepends) + right_lines = repr(right).splitlines(keepends) + + explanation = [] + explanation += [u"-" + line for line in left_lines] + explanation += [u"+" + line for line in right_lines] + + return explanation + + +def _compare_eq_iterable(left, right, verbose=0): + if not verbose: + return [u"Use -v to get the full diff"] + # dynamic import to speedup pytest + import difflib + + try: + left_formatting = pprint.pformat(left).splitlines() + right_formatting = pprint.pformat(right).splitlines() + explanation = [u"Full diff:"] + except Exception: + # hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling + # sorted() on a list would raise. See issue #718. + # As a workaround, the full diff is generated by using the repr() string of each item of each container. + left_formatting = sorted(repr(x) for x in left) + right_formatting = sorted(repr(x) for x in right) + explanation = [u"Full diff (fallback to calling repr on each item):"] + explanation.extend( + line.strip() for line in difflib.ndiff(left_formatting, right_formatting) + ) + return explanation + + +def _compare_eq_sequence(left, right, verbose=0): + explanation = [] + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + explanation += [u"At index %s diff: %r != %r" % (i, left[i], right[i])] + break + if len(left) > len(right): + explanation += [ + u"Left contains more items, first extra item: %s" + % saferepr(left[len(right)]) + ] + elif len(left) < len(right): + explanation += [ + u"Right contains more items, first extra item: %s" + % saferepr(right[len(left)]) + ] + return explanation + + +def _compare_eq_set(left, right, verbose=0): + explanation = [] + diff_left = left - right + diff_right = right - left + if diff_left: + explanation.append(u"Extra items in the left set:") + for item in diff_left: + explanation.append(saferepr(item)) + if diff_right: + explanation.append(u"Extra items in the right set:") + for item in diff_right: + explanation.append(saferepr(item)) + return explanation + + +def _compare_eq_dict(left, right, verbose=0): + explanation = [] + common = set(left).intersection(set(right)) + same = {k: left[k] for k in common if left[k] == right[k]} + if same and verbose < 2: + explanation += [u"Omitting %s identical items, use -vv to show" % len(same)] + elif same: + explanation += [u"Common items:"] + explanation += pprint.pformat(same).splitlines() + diff = {k for k in common if left[k] != right[k]} + if diff: + explanation += [u"Differing items:"] + for k in diff: + explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u"Left contains more items:") + explanation.extend( + pprint.pformat({k: left[k] for k in extra_left}).splitlines() + ) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u"Right contains more items:") + explanation.extend( + pprint.pformat({k: right[k] for k in extra_right}).splitlines() + ) + return explanation + + +def _compare_eq_cls(left, right, verbose, type_fns): + isdatacls, isattrs = type_fns + if isdatacls(left): + all_fields = left.__dataclass_fields__ + fields_to_check = [field for field, info in all_fields.items() if info.compare] + elif isattrs(left): + all_fields = left.__attrs_attrs__ + fields_to_check = [field.name for field in all_fields if field.cmp] + + same = [] + diff = [] + for field in fields_to_check: + if getattr(left, field) == getattr(right, field): + same.append(field) + else: + diff.append(field) + + explanation = [] + if same and verbose < 2: + explanation.append(u"Omitting %s identical items, use -vv to show" % len(same)) + elif same: + explanation += [u"Matching attributes:"] + explanation += pprint.pformat(same).splitlines() + if diff: + explanation += [u"Differing attributes:"] + for field in diff: + explanation += [ + (u"%s: %r != %r") % (field, getattr(left, field), getattr(right, field)) + ] + return explanation + + +def _notin_text(term, text, verbose=0): + index = text.find(term) + head = text[:index] + tail = text[index + len(term) :] + correct_text = head + tail + diff = _diff_text(correct_text, text, verbose) + newdiff = [u"%s is contained here:" % saferepr(term, maxsize=42)] + for line in diff: + if line.startswith(u"Skipping"): + continue + if line.startswith(u"- "): + continue + if line.startswith(u"+ "): + newdiff.append(u" " + line[2:]) + else: + newdiff.append(line) + return newdiff diff --git a/env_27/lib/python2.7/site-packages/_pytest/cacheprovider.py b/env_27/lib/python2.7/site-packages/_pytest/cacheprovider.py new file mode 100644 index 0000000..ef20395 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/cacheprovider.py @@ -0,0 +1,387 @@ +""" +merged implementation of the cache provider + +the name cache was not chosen to ensure pluggy automatically +ignores the external pytest-cache +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os +from collections import OrderedDict + +import attr +import py +import six + +import pytest +from .compat import _PY2 as PY2 +from .pathlib import Path +from .pathlib import resolve_from_str +from .pathlib import rmtree + +README_CONTENT = u"""\ +# pytest cache directory # + +This directory contains data from the pytest's cache plugin, +which provides the `--lf` and `--ff` options, as well as the `cache` fixture. + +**Do not** commit this to version control. + +See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information. +""" + +CACHEDIR_TAG_CONTENT = b"""\ +Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by pytest. +# For information about cache directory tags, see: +# http://www.bford.info/cachedir/spec.html +""" + + +@attr.s +class Cache(object): + _cachedir = attr.ib(repr=False) + _config = attr.ib(repr=False) + + @classmethod + def for_config(cls, config): + cachedir = cls.cache_dir_from_config(config) + if config.getoption("cacheclear") and cachedir.exists(): + rmtree(cachedir, force=True) + cachedir.mkdir() + return cls(cachedir, config) + + @staticmethod + def cache_dir_from_config(config): + return resolve_from_str(config.getini("cache_dir"), config.rootdir) + + def warn(self, fmt, **args): + from _pytest.warnings import _issue_warning_captured + from _pytest.warning_types import PytestWarning + + _issue_warning_captured( + PytestWarning(fmt.format(**args) if args else fmt), + self._config.hook, + stacklevel=3, + ) + + def makedir(self, name): + """ return a directory path object with the given name. If the + directory does not yet exist, it will be created. You can use it + to manage files likes e. g. store/retrieve database + dumps across test sessions. + + :param name: must be a string not containing a ``/`` separator. + Make sure the name contains your plugin or application + identifiers to prevent clashes with other cache users. + """ + name = Path(name) + if len(name.parts) > 1: + raise ValueError("name is not allowed to contain path separators") + res = self._cachedir.joinpath("d", name) + res.mkdir(exist_ok=True, parents=True) + return py.path.local(res) + + def _getvaluepath(self, key): + return self._cachedir.joinpath("v", Path(key)) + + def get(self, key, default): + """ return cached value for the given key. If no value + was yet cached or the value cannot be read, the specified + default is returned. + + :param key: must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param default: must be provided in case of a cache-miss or + invalid cache values. + + """ + path = self._getvaluepath(key) + try: + with path.open("r") as f: + return json.load(f) + except (ValueError, IOError, OSError): + return default + + def set(self, key, value): + """ save value for the given key. + + :param key: must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param value: must be of any combination of basic + python types, including nested types + like e. g. lists of dictionaries. + """ + path = self._getvaluepath(key) + try: + if path.parent.is_dir(): + cache_dir_exists_already = True + else: + cache_dir_exists_already = self._cachedir.exists() + path.parent.mkdir(exist_ok=True, parents=True) + except (IOError, OSError): + self.warn("could not create cache path {path}", path=path) + return + if not cache_dir_exists_already: + self._ensure_supporting_files() + try: + f = path.open("wb" if PY2 else "w") + except (IOError, OSError): + self.warn("cache could not write path {path}", path=path) + else: + with f: + json.dump(value, f, indent=2, sort_keys=True) + + def _ensure_supporting_files(self): + """Create supporting files in the cache dir that are not really part of the cache.""" + readme_path = self._cachedir / "README.md" + readme_path.write_text(README_CONTENT) + + gitignore_path = self._cachedir.joinpath(".gitignore") + msg = u"# Created by pytest automatically.\n*" + gitignore_path.write_text(msg, encoding="UTF-8") + + cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG") + cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT) + + +class LFPlugin(object): + """ Plugin which implements the --lf (run last-failing) option """ + + def __init__(self, config): + self.config = config + active_keys = "lf", "failedfirst" + self.active = any(config.getoption(key) for key in active_keys) + self.lastfailed = config.cache.get("cache/lastfailed", {}) + self._previously_failed_count = None + self._no_failures_behavior = self.config.getoption("last_failed_no_failures") + + def pytest_report_collectionfinish(self): + if self.active and self.config.getoption("verbose") >= 0: + if not self._previously_failed_count: + return None + noun = "failure" if self._previously_failed_count == 1 else "failures" + suffix = " first" if self.config.getoption("failedfirst") else "" + mode = "rerun previous {count} {noun}{suffix}".format( + count=self._previously_failed_count, suffix=suffix, noun=noun + ) + return "run-last-failure: %s" % mode + + def pytest_runtest_logreport(self, report): + if (report.when == "call" and report.passed) or report.skipped: + self.lastfailed.pop(report.nodeid, None) + elif report.failed: + self.lastfailed[report.nodeid] = True + + def pytest_collectreport(self, report): + passed = report.outcome in ("passed", "skipped") + if passed: + if report.nodeid in self.lastfailed: + self.lastfailed.pop(report.nodeid) + self.lastfailed.update((item.nodeid, True) for item in report.result) + else: + self.lastfailed[report.nodeid] = True + + def pytest_collection_modifyitems(self, session, config, items): + if self.active: + if self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + self._previously_failed_count = len(previously_failed) + if not previously_failed: + # running a subset of all tests with recorded failures outside + # of the set of tests currently executing + return + if self.config.getoption("lf"): + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) + else: + items[:] = previously_failed + previously_passed + elif self._no_failures_behavior == "none": + config.hook.pytest_deselected(items=items) + items[:] = [] + + def pytest_sessionfinish(self, session): + config = self.config + if config.getoption("cacheshow") or hasattr(config, "slaveinput"): + return + + saved_lastfailed = config.cache.get("cache/lastfailed", {}) + if saved_lastfailed != self.lastfailed: + config.cache.set("cache/lastfailed", self.lastfailed) + + +class NFPlugin(object): + """ Plugin which implements the --nf (run new-first) option """ + + def __init__(self, config): + self.config = config + self.active = config.option.newfirst + self.cached_nodeids = config.cache.get("cache/nodeids", []) + + def pytest_collection_modifyitems(self, session, config, items): + if self.active: + new_items = OrderedDict() + other_items = OrderedDict() + for item in items: + if item.nodeid not in self.cached_nodeids: + new_items[item.nodeid] = item + else: + other_items[item.nodeid] = item + + items[:] = self._get_increasing_order( + six.itervalues(new_items) + ) + self._get_increasing_order(six.itervalues(other_items)) + self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)] + + def _get_increasing_order(self, items): + return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True) + + def pytest_sessionfinish(self, session): + config = self.config + if config.getoption("cacheshow") or hasattr(config, "slaveinput"): + return + + config.cache.set("cache/nodeids", self.cached_nodeids) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption( + "--lf", + "--last-failed", + action="store_true", + dest="lf", + help="rerun only the tests that failed " + "at the last run (or all if none failed)", + ) + group.addoption( + "--ff", + "--failed-first", + action="store_true", + dest="failedfirst", + help="run all tests but run the last failures first. " + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown", + ) + group.addoption( + "--nf", + "--new-first", + action="store_true", + dest="newfirst", + help="run tests from new files first, then the rest of the tests " + "sorted by file mtime", + ) + group.addoption( + "--cache-show", + action="store_true", + dest="cacheshow", + help="show cache contents, don't perform collection or tests", + ) + group.addoption( + "--cache-clear", + action="store_true", + dest="cacheclear", + help="remove all cache contents at start of test run.", + ) + cache_dir_default = ".pytest_cache" + if "TOX_ENV_DIR" in os.environ: + cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default) + parser.addini("cache_dir", default=cache_dir_default, help="cache directory path.") + group.addoption( + "--lfnf", + "--last-failed-no-failures", + action="store", + dest="last_failed_no_failures", + choices=("all", "none"), + default="all", + help="change the behavior when no test failed in the last run or no " + "information about the last failures was found in the cache", + ) + + +def pytest_cmdline_main(config): + if config.option.cacheshow: + from _pytest.main import wrap_session + + return wrap_session(config, cacheshow) + + +@pytest.hookimpl(tryfirst=True) +def pytest_configure(config): + config.cache = Cache.for_config(config) + config.pluginmanager.register(LFPlugin(config), "lfplugin") + config.pluginmanager.register(NFPlugin(config), "nfplugin") + + +@pytest.fixture +def cache(request): + """ + Return a cache object that can persist state between testing sessions. + + cache.get(key, default) + cache.set(key, value) + + Keys must be a ``/`` separated value, where the first part is usually the + name of your plugin or application to avoid clashes with other cache users. + + Values can be any object handled by the json stdlib module. + """ + return request.config.cache + + +def pytest_report_header(config): + """Display cachedir with --cache-show and if non-default.""" + if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache": + cachedir = config.cache._cachedir + # TODO: evaluate generating upward relative paths + # starting with .., ../.. if sensible + + try: + displaypath = cachedir.relative_to(config.rootdir) + except ValueError: + displaypath = cachedir + return "cachedir: {}".format(displaypath) + + +def cacheshow(config, session): + from pprint import pformat + + tw = py.io.TerminalWriter() + tw.line("cachedir: " + str(config.cache._cachedir)) + if not config.cache._cachedir.is_dir(): + tw.line("cache is empty") + return 0 + dummy = object() + basedir = config.cache._cachedir + vdir = basedir / "v" + tw.sep("-", "cache values") + for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()): + key = valpath.relative_to(vdir) + val = config.cache.get(key, dummy) + if val is dummy: + tw.line("%s contains unreadable content, will be ignored" % key) + else: + tw.line("%s contains:" % key) + for line in pformat(val).splitlines(): + tw.line(" " + line) + + ddir = basedir / "d" + if ddir.is_dir(): + contents = sorted(ddir.rglob("*")) + tw.sep("-", "cache directories") + for p in contents: + # if p.check(dir=1): + # print("%s/" % p.relto(basedir)) + if p.is_file(): + key = p.relative_to(basedir) + tw.line("{} is a file of length {:d}".format(key, p.stat().st_size)) + return 0 diff --git a/env_27/lib/python2.7/site-packages/_pytest/capture.py b/env_27/lib/python2.7/site-packages/_pytest/capture.py new file mode 100644 index 0000000..0e8a693 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/capture.py @@ -0,0 +1,822 @@ +""" +per-test stdout/stderr capturing mechanism. + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import contextlib +import io +import os +import sys +from io import UnsupportedOperation +from tempfile import TemporaryFile + +import six + +import pytest +from _pytest.compat import _PY3 +from _pytest.compat import CaptureIO + +patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + "--capture", + action="store", + default="fd" if hasattr(os, "dup") else "sys", + metavar="method", + choices=["fd", "sys", "no"], + help="per-test capturing method: one of fd|sys|no.", + ) + group._addoption( + "-s", + action="store_const", + const="no", + dest="capture", + help="shortcut for --capture=no.", + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_load_initial_conftests(early_config, parser, args): + ns = early_config.known_args_namespace + if ns.capture == "fd": + _py36_windowsconsoleio_workaround(sys.stdout) + _colorama_workaround() + _readline_workaround() + pluginmanager = early_config.pluginmanager + capman = CaptureManager(ns.capture) + pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + early_config.add_cleanup(capman.stop_global_capturing) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + + early_config.add_cleanup(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.start_global_capturing() + outcome = yield + capman.suspend_global_capture() + if outcome.excinfo is not None: + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + +class CaptureManager(object): + """ + Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each + test phase (setup, call, teardown). After each of those points, the captured output is obtained and + attached to the collection/runtest report. + + There are two levels of capture: + * global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled + during collection and each test phase. + * fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this + case special handling is needed to ensure the fixtures take precedence over the global capture. + """ + + def __init__(self, method): + self._method = method + self._global_capturing = None + self._current_item = None + + def __repr__(self): + return "" % ( + self._method, + self._global_capturing, + self._current_item, + ) + + def _getcapture(self, method): + if method == "fd": + return MultiCapture(out=True, err=True, Capture=FDCapture) + elif method == "sys": + return MultiCapture(out=True, err=True, Capture=SysCapture) + elif method == "no": + return MultiCapture(out=False, err=False, in_=False) + raise ValueError("unknown capturing method: %r" % method) # pragma: no cover + + def is_capturing(self): + if self.is_globally_capturing(): + return "global" + capture_fixture = getattr(self._current_item, "_capture_fixture", None) + if capture_fixture is not None: + return ( + "fixture %s" % self._current_item._capture_fixture.request.fixturename + ) + return False + + # Global capturing control + + def is_globally_capturing(self): + return self._method != "no" + + def start_global_capturing(self): + assert self._global_capturing is None + self._global_capturing = self._getcapture(self._method) + self._global_capturing.start_capturing() + + def stop_global_capturing(self): + if self._global_capturing is not None: + self._global_capturing.pop_outerr_to_orig() + self._global_capturing.stop_capturing() + self._global_capturing = None + + def resume_global_capture(self): + # During teardown of the python process, and on rare occasions, capture + # attributes can be `None` while trying to resume global capture. + if self._global_capturing is not None: + self._global_capturing.resume_capturing() + + def suspend_global_capture(self, in_=False): + cap = getattr(self, "_global_capturing", None) + if cap is not None: + cap.suspend_capturing(in_=in_) + + def suspend(self, in_=False): + # Need to undo local capsys-et-al if it exists before disabling global capture. + self.suspend_fixture(self._current_item) + self.suspend_global_capture(in_) + + def resume(self): + self.resume_global_capture() + self.resume_fixture(self._current_item) + + def read_global_capture(self): + return self._global_capturing.readouterr() + + # Fixture Control (it's just forwarding, think about removing this later) + + def activate_fixture(self, item): + """If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over + the global capture. + """ + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture._start() + + def deactivate_fixture(self, item): + """Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any.""" + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture.close() + + def suspend_fixture(self, item): + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture._suspend() + + def resume_fixture(self, item): + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture._resume() + + # Helper context managers + + @contextlib.contextmanager + def global_and_fixture_disabled(self): + """Context manager to temporarily disable global and current fixture capturing.""" + self.suspend() + try: + yield + finally: + self.resume() + + @contextlib.contextmanager + def item_capture(self, when, item): + self.resume_global_capture() + self.activate_fixture(item) + try: + yield + finally: + self.deactivate_fixture(item) + self.suspend_global_capture(in_=False) + + out, err = self.read_global_capture() + item.add_report_section(when, "stdout", out) + item.add_report_section(when, "stderr", err) + + # Hooks + + @pytest.hookimpl(hookwrapper=True) + def pytest_make_collect_report(self, collector): + if isinstance(collector, pytest.File): + self.resume_global_capture() + outcome = yield + self.suspend_global_capture() + out, err = self.read_global_capture() + rep = outcome.get_result() + if out: + rep.sections.append(("Captured stdout", out)) + if err: + rep.sections.append(("Captured stderr", err)) + else: + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_protocol(self, item): + self._current_item = item + yield + self._current_item = None + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item): + with self.item_capture("setup", item): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item): + with self.item_capture("call", item): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item): + with self.item_capture("teardown", item): + yield + + @pytest.hookimpl(tryfirst=True) + def pytest_keyboard_interrupt(self, excinfo): + self.stop_global_capturing() + + @pytest.hookimpl(tryfirst=True) + def pytest_internalerror(self, excinfo): + self.stop_global_capturing() + + +capture_fixtures = {"capfd", "capfdbinary", "capsys", "capsysbinary"} + + +def _ensure_only_one_capture_fixture(request, name): + fixtures = set(request.fixturenames) & capture_fixtures - {name} + if fixtures: + fixtures = sorted(fixtures) + fixtures = fixtures[0] if len(fixtures) == 1 else fixtures + raise request.raiseerror( + "cannot use {} and {} at the same time".format(fixtures, name) + ) + + +@pytest.fixture +def capsys(request): + """Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsys.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + """ + _ensure_only_one_capture_fixture(request, "capsys") + with _install_capture_fixture_on_item(request, SysCapture) as fixture: + yield fixture + + +@pytest.fixture +def capsysbinary(request): + """Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsysbinary.readouterr()`` + method calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``bytes`` objects. + """ + _ensure_only_one_capture_fixture(request, "capsysbinary") + # Currently, the implementation uses the python3 specific `.buffer` + # property of CaptureIO. + if sys.version_info < (3,): + raise request.raiseerror("capsysbinary is only supported on Python 3") + with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture: + yield fixture + + +@pytest.fixture +def capfd(request): + """Enable text capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + """ + _ensure_only_one_capture_fixture(request, "capfd") + if not hasattr(os, "dup"): + pytest.skip( + "capfd fixture needs os.dup function which is not available in this system" + ) + with _install_capture_fixture_on_item(request, FDCapture) as fixture: + yield fixture + + +@pytest.fixture +def capfdbinary(request): + """Enable bytes capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``byte`` objects. + """ + _ensure_only_one_capture_fixture(request, "capfdbinary") + if not hasattr(os, "dup"): + pytest.skip( + "capfdbinary fixture needs os.dup function which is not available in this system" + ) + with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture: + yield fixture + + +@contextlib.contextmanager +def _install_capture_fixture_on_item(request, capture_class): + """ + Context manager which creates a ``CaptureFixture`` instance and "installs" it on + the item/node of the given request. Used by ``capsys`` and ``capfd``. + + The CaptureFixture is added as attribute of the item because it needs to accessed + by ``CaptureManager`` during its ``pytest_runtest_*`` hooks. + """ + request.node._capture_fixture = fixture = CaptureFixture(capture_class, request) + capmanager = request.config.pluginmanager.getplugin("capturemanager") + # Need to active this fixture right away in case it is being used by another fixture (setup phase). + # If this fixture is being used only by a test function (call phase), then we wouldn't need this + # activation, but it doesn't hurt. + capmanager.activate_fixture(request.node) + yield fixture + fixture.close() + del request.node._capture_fixture + + +class CaptureFixture(object): + """ + Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary` + fixtures. + """ + + def __init__(self, captureclass, request): + self.captureclass = captureclass + self.request = request + self._capture = None + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + + def _start(self): + # Start if not started yet + if getattr(self, "_capture", None) is None: + self._capture = MultiCapture( + out=True, err=True, in_=False, Capture=self.captureclass + ) + self._capture.start_capturing() + + def close(self): + if self._capture is not None: + out, err = self._capture.pop_outerr_to_orig() + self._captured_out += out + self._captured_err += err + self._capture.stop_capturing() + self._capture = None + + def readouterr(self): + """Read and return the captured output so far, resetting the internal buffer. + + :return: captured content as a namedtuple with ``out`` and ``err`` string attributes + """ + captured_out, captured_err = self._captured_out, self._captured_err + if self._capture is not None: + out, err = self._capture.readouterr() + captured_out += out + captured_err += err + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + return CaptureResult(captured_out, captured_err) + + def _suspend(self): + """Suspends this fixture's own capturing temporarily.""" + self._capture.suspend_capturing() + + def _resume(self): + """Resumes this fixture's own capturing temporarily.""" + self._capture.resume_capturing() + + @contextlib.contextmanager + def disabled(self): + """Temporarily disables capture while inside the 'with' block.""" + capmanager = self.request.config.pluginmanager.getplugin("capturemanager") + with capmanager.global_and_fixture_disabled(): + yield + + +def safe_text_dupfile(f, mode, default_encoding="UTF8"): + """ return an open text file object that's a duplicate of f on the + FD-level if possible. + """ + encoding = getattr(f, "encoding", None) + try: + fd = f.fileno() + except Exception: + if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"): + # we seem to have a text stream, let's just use it + return f + else: + newfd = os.dup(fd) + if "b" not in mode: + mode += "b" + f = os.fdopen(newfd, mode, 0) # no buffering + return EncodedFile(f, encoding or default_encoding) + + +class EncodedFile(object): + errors = "strict" # possibly needed by py3 code (issue555) + + def __init__(self, buffer, encoding): + self.buffer = buffer + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, six.text_type): + obj = obj.encode(self.encoding, "replace") + elif _PY3: + raise TypeError( + "write() argument must be str, not {}".format(type(obj).__name__) + ) + self.buffer.write(obj) + + def writelines(self, linelist): + data = "".join(linelist) + self.write(data) + + @property + def name(self): + """Ensure that file.name is a string.""" + return repr(self.buffer) + + def __getattr__(self, name): + return getattr(object.__getattribute__(self, "buffer"), name) + + +CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"]) + + +class MultiCapture(object): + out = err = in_ = None + + def __init__(self, out=True, err=True, in_=True, Capture=None): + if in_: + self.in_ = Capture(0) + if out: + self.out = Capture(1) + if err: + self.err = Capture(2) + + def __repr__(self): + return "" % (self.out, self.err, self.in_) + + def start_capturing(self): + if self.in_: + self.in_.start() + if self.out: + self.out.start() + if self.err: + self.err.start() + + def pop_outerr_to_orig(self): + """ pop current snapshot out/err capture and flush to orig streams. """ + out, err = self.readouterr() + if out: + self.out.writeorg(out) + if err: + self.err.writeorg(err) + return out, err + + def suspend_capturing(self, in_=False): + if self.out: + self.out.suspend() + if self.err: + self.err.suspend() + if in_ and self.in_: + self.in_.suspend() + self._in_suspended = True + + def resume_capturing(self): + if self.out: + self.out.resume() + if self.err: + self.err.resume() + if hasattr(self, "_in_suspended"): + self.in_.resume() + del self._in_suspended + + def stop_capturing(self): + """ stop capturing and reset capturing streams """ + if hasattr(self, "_reset"): + raise ValueError("was already stopped") + self._reset = True + if self.out: + self.out.done() + if self.err: + self.err.done() + if self.in_: + self.in_.done() + + def readouterr(self): + """ return snapshot unicode value of stdout/stderr capturings. """ + return CaptureResult( + self.out.snap() if self.out is not None else "", + self.err.snap() if self.err is not None else "", + ) + + +class NoCapture(object): + EMPTY_BUFFER = None + __init__ = start = done = suspend = resume = lambda *args: None + + +class FDCaptureBinary(object): + """Capture IO to/from a given os-level filedescriptor. + + snap() produces `bytes` + """ + + EMPTY_BUFFER = b"" + + def __init__(self, targetfd, tmpfile=None): + self.targetfd = targetfd + try: + self.targetfd_save = os.dup(self.targetfd) + except OSError: + self.start = lambda: None + self.done = lambda: None + else: + if targetfd == 0: + assert not tmpfile, "cannot set tmpfile with stdin" + tmpfile = open(os.devnull, "r") + self.syscapture = SysCapture(targetfd) + else: + if tmpfile is None: + f = TemporaryFile() + with f: + tmpfile = safe_text_dupfile(f, mode="wb+") + if targetfd in patchsysdict: + self.syscapture = SysCapture(targetfd, tmpfile) + else: + self.syscapture = NoCapture() + self.tmpfile = tmpfile + self.tmpfile_fd = tmpfile.fileno() + + def __repr__(self): + return "" % ( + self.targetfd, + getattr(self, "targetfd_save", None), + ) + + def start(self): + """ Start capturing on targetfd using memorized tmpfile. """ + try: + os.fstat(self.targetfd_save) + except (AttributeError, OSError): + raise ValueError("saved filedescriptor not valid anymore") + os.dup2(self.tmpfile_fd, self.targetfd) + self.syscapture.start() + + def snap(self): + self.tmpfile.seek(0) + res = self.tmpfile.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def done(self): + """ stop capturing, restore streams, return original capture file, + seeked to position zero. """ + targetfd_save = self.__dict__.pop("targetfd_save") + os.dup2(targetfd_save, self.targetfd) + os.close(targetfd_save) + self.syscapture.done() + _attempt_to_close_capture_file(self.tmpfile) + + def suspend(self): + self.syscapture.suspend() + os.dup2(self.targetfd_save, self.targetfd) + + def resume(self): + self.syscapture.resume() + os.dup2(self.tmpfile_fd, self.targetfd) + + def writeorg(self, data): + """ write to original file descriptor. """ + if isinstance(data, six.text_type): + data = data.encode("utf8") # XXX use encoding of original stream + os.write(self.targetfd_save, data) + + +class FDCapture(FDCaptureBinary): + """Capture IO to/from a given os-level filedescriptor. + + snap() produces text + """ + + EMPTY_BUFFER = str() + + def snap(self): + res = super(FDCapture, self).snap() + enc = getattr(self.tmpfile, "encoding", None) + if enc and isinstance(res, bytes): + res = six.text_type(res, enc, "replace") + return res + + +class SysCapture(object): + + EMPTY_BUFFER = str() + + def __init__(self, fd, tmpfile=None): + name = patchsysdict[fd] + self._old = getattr(sys, name) + self.name = name + if tmpfile is None: + if name == "stdin": + tmpfile = DontReadFromInput() + else: + tmpfile = CaptureIO() + self.tmpfile = tmpfile + + def start(self): + setattr(sys, self.name, self.tmpfile) + + def snap(self): + res = self.tmpfile.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def done(self): + setattr(sys, self.name, self._old) + del self._old + _attempt_to_close_capture_file(self.tmpfile) + + def suspend(self): + setattr(sys, self.name, self._old) + + def resume(self): + setattr(sys, self.name, self.tmpfile) + + def writeorg(self, data): + self._old.write(data) + self._old.flush() + + +class SysCaptureBinary(SysCapture): + EMPTY_BUFFER = b"" + + def snap(self): + res = self.tmpfile.buffer.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + +class DontReadFromInput(six.Iterator): + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + + encoding = None + + def read(self, *args): + raise IOError("reading from stdin while output is captured") + + readline = read + readlines = read + __next__ = read + + def __iter__(self): + return self + + def fileno(self): + raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass + + @property + def buffer(self): + if sys.version_info >= (3, 0): + return self + else: + raise AttributeError("redirected stdin has no attribute buffer") + + +def _colorama_workaround(): + """ + Ensure colorama is imported so that it attaches to the correct stdio + handles on Windows. + + colorama uses the terminal on import time. So if something does the + first import of colorama while I/O capture is active, colorama will + fail in various ways. + """ + if sys.platform.startswith("win32"): + try: + import colorama # noqa: F401 + except ImportError: + pass + + +def _readline_workaround(): + """ + Ensure readline is imported so that it attaches to the correct stdio + handles on Windows. + + Pdb uses readline support where available--when not running from the Python + prompt, the readline module is not imported until running the pdb REPL. If + running pytest with the --pdb option this means the readline module is not + imported until after I/O capture has been started. + + This is a problem for pyreadline, which is often used to implement readline + support on Windows, as it does not attach to the correct handles for stdout + and/or stdin if they have been redirected by the FDCapture mechanism. This + workaround ensures that readline is imported before I/O capture is setup so + that it can attach to the actual stdin/out for the console. + + See https://github.com/pytest-dev/pytest/pull/1281 + """ + if sys.platform.startswith("win32"): + try: + import readline # noqa: F401 + except ImportError: + pass + + +def _py36_windowsconsoleio_workaround(stream): + """ + Python 3.6 implemented unicode console handling for Windows. This works + by reading/writing to the raw console handle using + ``{Read,Write}ConsoleW``. + + The problem is that we are going to ``dup2`` over the stdio file + descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the + handles used by Python to write to the console. Though there is still some + weirdness and the console handle seems to only be closed randomly and not + on the first call to ``CloseHandle``, or maybe it gets reopened with the + same handle value when we suspend capturing. + + The workaround in this case will reopen stdio with a different fd which + also means a different handle by replicating the logic in + "Py_lifecycle.c:initstdio/create_stdio". + + :param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given + here as parameter for unittesting purposes. + + See https://github.com/pytest-dev/py/issues/103 + """ + if not sys.platform.startswith("win32") or sys.version_info[:2] < (3, 6): + return + + # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666) + if not hasattr(stream, "buffer"): + return + + buffered = hasattr(stream.buffer, "raw") + raw_stdout = stream.buffer.raw if buffered else stream.buffer + + if not isinstance(raw_stdout, io._WindowsConsoleIO): + return + + def _reopen_stdio(f, mode): + if not buffered and mode[0] == "w": + buffering = 0 + else: + buffering = -1 + + return io.TextIOWrapper( + open(os.dup(f.fileno()), mode, buffering), + f.encoding, + f.errors, + f.newlines, + f.line_buffering, + ) + + sys.stdin = _reopen_stdio(sys.stdin, "rb") + sys.stdout = _reopen_stdio(sys.stdout, "wb") + sys.stderr = _reopen_stdio(sys.stderr, "wb") + + +def _attempt_to_close_capture_file(f): + """Suppress IOError when closing the temporary file used for capturing streams in py27 (#2370)""" + if six.PY2: + try: + f.close() + except IOError: + pass + else: + f.close() diff --git a/env_27/lib/python2.7/site-packages/_pytest/compat.py b/env_27/lib/python2.7/site-packages/_pytest/compat.py new file mode 100644 index 0000000..fa878a4 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/compat.py @@ -0,0 +1,457 @@ +""" +python version compatibility code +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import codecs +import functools +import inspect +import re +import sys +from contextlib import contextmanager + +import py +import six +from six import text_type + +import _pytest +from _pytest._io.saferepr import saferepr +from _pytest.outcomes import fail +from _pytest.outcomes import TEST_OUTCOME + +try: + import enum +except ImportError: # pragma: no cover + # Only available in Python 3.4+ or as a backport + enum = None + +_PY3 = sys.version_info > (3, 0) +_PY2 = not _PY3 + + +if _PY3: + from inspect import signature, Parameter as Parameter +else: + from funcsigs import signature, Parameter as Parameter + +NoneType = type(None) +NOTSET = object() + +PY35 = sys.version_info[:2] >= (3, 5) +PY36 = sys.version_info[:2] >= (3, 6) +MODULE_NOT_FOUND_ERROR = "ModuleNotFoundError" if PY36 else "ImportError" + + +if _PY3: + from collections.abc import MutableMapping as MappingMixin + from collections.abc import Iterable, Mapping, Sequence, Sized +else: + # those raise DeprecationWarnings in Python >=3.7 + from collections import MutableMapping as MappingMixin # noqa + from collections import Iterable, Mapping, Sequence, Sized # noqa + + +if sys.version_info >= (3, 4): + from importlib.util import spec_from_file_location +else: + + def spec_from_file_location(*_, **__): + return None + + +def _format_args(func): + return str(signature(func)) + + +isfunction = inspect.isfunction +isclass = inspect.isclass +# used to work around a python2 exception info leak +exc_clear = getattr(sys, "exc_clear", lambda: None) +# The type of re.compile objects is not exposed in Python. +REGEX_TYPE = type(re.compile("")) + + +def is_generator(func): + genfunc = inspect.isgeneratorfunction(func) + return genfunc and not iscoroutinefunction(func) + + +def iscoroutinefunction(func): + """Return True if func is a decorated coroutine function. + + Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly, + which in turns also initializes the "logging" module as side-effect (see issue #8). + """ + return getattr(func, "_is_coroutine", False) or ( + hasattr(inspect, "iscoroutinefunction") and inspect.iscoroutinefunction(func) + ) + + +def getlocation(function, curdir): + function = get_real_func(function) + fn = py.path.local(inspect.getfile(function)) + lineno = function.__code__.co_firstlineno + if fn.relto(curdir): + fn = fn.relto(curdir) + return "%s:%d" % (fn, lineno + 1) + + +def num_mock_patch_args(function): + """ return number of arguments used up by mock arguments (if any) """ + patchings = getattr(function, "patchings", None) + if not patchings: + return 0 + mock_modules = [sys.modules.get("mock"), sys.modules.get("unittest.mock")] + if any(mock_modules): + sentinels = [m.DEFAULT for m in mock_modules if m is not None] + return len( + [p for p in patchings if not p.attribute_name and p.new in sentinels] + ) + return len(patchings) + + +def getfuncargnames(function, is_method=False, cls=None): + """Returns the names of a function's mandatory arguments. + + This should return the names of all function arguments that: + * Aren't bound to an instance or type as in instance or class methods. + * Don't have default values. + * Aren't bound with functools.partial. + * Aren't replaced with mocks. + + The is_method and cls arguments indicate that the function should + be treated as a bound method even though it's not unless, only in + the case of cls, the function is a static method. + + @RonnyPfannschmidt: This function should be refactored when we + revisit fixtures. The fixture mechanism should ask the node for + the fixture names, and not try to obtain directly from the + function object well after collection has occurred. + + """ + # The parameters attribute of a Signature object contains an + # ordered mapping of parameter names to Parameter instances. This + # creates a tuple of the names of the parameters that don't have + # defaults. + try: + parameters = signature(function).parameters + except (ValueError, TypeError) as e: + fail( + "Could not determine arguments of {!r}: {}".format(function, e), + pytrace=False, + ) + + arg_names = tuple( + p.name + for p in parameters.values() + if ( + p.kind is Parameter.POSITIONAL_OR_KEYWORD + or p.kind is Parameter.KEYWORD_ONLY + ) + and p.default is Parameter.empty + ) + # If this function should be treated as a bound method even though + # it's passed as an unbound method or function, remove the first + # parameter name. + if is_method or ( + cls and not isinstance(cls.__dict__.get(function.__name__, None), staticmethod) + ): + arg_names = arg_names[1:] + # Remove any names that will be replaced with mocks. + if hasattr(function, "__wrapped__"): + arg_names = arg_names[num_mock_patch_args(function) :] + return arg_names + + +@contextmanager +def dummy_context_manager(): + """Context manager that does nothing, useful in situations where you might need an actual context manager or not + depending on some condition. Using this allow to keep the same code""" + yield + + +def get_default_arg_names(function): + # Note: this code intentionally mirrors the code at the beginning of getfuncargnames, + # to get the arguments which were excluded from its result because they had default values + return tuple( + p.name + for p in signature(function).parameters.values() + if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) + and p.default is not Parameter.empty + ) + + +_non_printable_ascii_translate_table = { + i: u"\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127) +} +_non_printable_ascii_translate_table.update( + {ord("\t"): u"\\t", ord("\r"): u"\\r", ord("\n"): u"\\n"} +) + + +def _translate_non_printable(s): + return s.translate(_non_printable_ascii_translate_table) + + +if _PY3: + STRING_TYPES = bytes, str + UNICODE_TYPES = six.text_type + + if PY35: + + def _bytes_to_ascii(val): + return val.decode("ascii", "backslashreplace") + + else: + + def _bytes_to_ascii(val): + if val: + # source: http://goo.gl/bGsnwC + encoded_bytes, _ = codecs.escape_encode(val) + return encoded_bytes.decode("ascii") + else: + # empty bytes crashes codecs.escape_encode (#1087) + return "" + + def ascii_escaped(val): + """If val is pure ascii, returns it as a str(). Otherwise, escapes + bytes objects into a sequence of escaped bytes: + + b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6' + + and escapes unicode objects into a sequence of escaped unicode + ids, e.g.: + + '4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944' + + note: + the obvious "v.decode('unicode-escape')" will return + valid utf-8 unicode if it finds them in bytes, but we + want to return escaped bytes for any byte, even if they match + a utf-8 string. + + """ + if isinstance(val, bytes): + ret = _bytes_to_ascii(val) + else: + ret = val.encode("unicode_escape").decode("ascii") + return _translate_non_printable(ret) + + +else: + STRING_TYPES = six.string_types + UNICODE_TYPES = six.text_type + + def ascii_escaped(val): + """In py2 bytes and str are the same type, so return if it's a bytes + object, return it unchanged if it is a full ascii string, + otherwise escape it into its binary form. + + If it's a unicode string, change the unicode characters into + unicode escapes. + + """ + if isinstance(val, bytes): + try: + ret = val.decode("ascii") + except UnicodeDecodeError: + ret = val.encode("string-escape").decode("ascii") + else: + ret = val.encode("unicode-escape").decode("ascii") + return _translate_non_printable(ret) + + +class _PytestWrapper(object): + """Dummy wrapper around a function object for internal use only. + + Used to correctly unwrap the underlying function object + when we are creating fixtures, because we wrap the function object ourselves with a decorator + to issue warnings when the fixture function is called directly. + """ + + def __init__(self, obj): + self.obj = obj + + +def get_real_func(obj): + """ gets the real function object of the (possibly) wrapped object by + functools.wraps or functools.partial. + """ + start_obj = obj + for i in range(100): + # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function + # to trigger a warning if it gets called directly instead of by pytest: we don't + # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774) + new_obj = getattr(obj, "__pytest_wrapped__", None) + if isinstance(new_obj, _PytestWrapper): + obj = new_obj.obj + break + new_obj = getattr(obj, "__wrapped__", None) + if new_obj is None: + break + obj = new_obj + else: + raise ValueError( + ("could not find real function of {start}\nstopped at {current}").format( + start=saferepr(start_obj), current=saferepr(obj) + ) + ) + if isinstance(obj, functools.partial): + obj = obj.func + return obj + + +def get_real_method(obj, holder): + """ + Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time + returning a bound method to ``holder`` if the original object was a bound method. + """ + try: + is_method = hasattr(obj, "__func__") + obj = get_real_func(obj) + except Exception: + return obj + if is_method and hasattr(obj, "__get__") and callable(obj.__get__): + obj = obj.__get__(holder) + return obj + + +def getfslineno(obj): + # xxx let decorators etc specify a sane ordering + obj = get_real_func(obj) + if hasattr(obj, "place_as"): + obj = obj.place_as + fslineno = _pytest._code.getfslineno(obj) + assert isinstance(fslineno[1], int), obj + return fslineno + + +def getimfunc(func): + try: + return func.__func__ + except AttributeError: + return func + + +def safe_getattr(object, name, default): + """ Like getattr but return default upon any Exception or any OutcomeException. + + Attribute access can potentially fail for 'evil' Python objects. + See issue #214. + It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException + instead of Exception (for more details check #2707) + """ + try: + return getattr(object, name, default) + except TEST_OUTCOME: + return default + + +def safe_isclass(obj): + """Ignore any exception via isinstance on Python 3.""" + try: + return isclass(obj) + except Exception: + return False + + +def _is_unittest_unexpected_success_a_failure(): + """Return if the test suite should fail if an @expectedFailure unittest test PASSES. + + From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful: + Changed in version 3.4: Returns False if there were any + unexpectedSuccesses from tests marked with the expectedFailure() decorator. + """ + return sys.version_info >= (3, 4) + + +if _PY3: + + def safe_str(v): + """returns v as string""" + return str(v) + + +else: + + def safe_str(v): + """returns v as string, converting to ascii if necessary""" + try: + return str(v) + except UnicodeError: + if not isinstance(v, text_type): + v = text_type(v) + errors = "replace" + return v.encode("utf-8", errors) + + +COLLECT_FAKEMODULE_ATTRIBUTES = ( + "Collector", + "Module", + "Function", + "Instance", + "Session", + "Item", + "Class", + "File", + "_fillfuncargs", +) + + +def _setup_collect_fakemodule(): + from types import ModuleType + import pytest + + pytest.collect = ModuleType("pytest.collect") + pytest.collect.__all__ = [] # used for setns + for attr in COLLECT_FAKEMODULE_ATTRIBUTES: + setattr(pytest.collect, attr, getattr(pytest, attr)) + + +if _PY2: + # Without this the test_dupfile_on_textio will fail, otherwise CaptureIO could directly inherit from StringIO. + from py.io import TextIO + + class CaptureIO(TextIO): + @property + def encoding(self): + return getattr(self, "_encoding", "UTF-8") + + +else: + import io + + class CaptureIO(io.TextIOWrapper): + def __init__(self): + super(CaptureIO, self).__init__( + io.BytesIO(), encoding="UTF-8", newline="", write_through=True + ) + + def getvalue(self): + return self.buffer.getvalue().decode("UTF-8") + + +class FuncargnamesCompatAttr(object): + """ helper class so that Metafunc, Function and FixtureRequest + don't need to each define the "funcargnames" compatibility attribute. + """ + + @property + def funcargnames(self): + """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" + return self.fixturenames + + +if six.PY2: + + def lru_cache(*_, **__): + def dec(fn): + return fn + + return dec + + +else: + from functools import lru_cache # noqa: F401 diff --git a/env_27/lib/python2.7/site-packages/_pytest/config/__init__.py b/env_27/lib/python2.7/site-packages/_pytest/config/__init__.py new file mode 100644 index 0000000..4ed9dea --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/config/__init__.py @@ -0,0 +1,1102 @@ +""" command line options, ini-file and conftest.py processing. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import copy +import inspect +import os +import shlex +import sys +import types +import warnings + +import py +import six +from pluggy import HookimplMarker +from pluggy import HookspecMarker +from pluggy import PluginManager + +import _pytest._code +import _pytest.assertion +import _pytest.hookspec # the extension point definitions +from .exceptions import PrintHelp +from .exceptions import UsageError +from .findpaths import determine_setup +from .findpaths import exists +from _pytest import deprecated +from _pytest._code import ExceptionInfo +from _pytest._code import filter_traceback +from _pytest.compat import lru_cache +from _pytest.compat import safe_str +from _pytest.outcomes import fail +from _pytest.outcomes import Skipped +from _pytest.warning_types import PytestWarning + +hookimpl = HookimplMarker("pytest") +hookspec = HookspecMarker("pytest") + + +class ConftestImportFailure(Exception): + def __init__(self, path, excinfo): + Exception.__init__(self, path, excinfo) + self.path = path + self.excinfo = excinfo + + +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + from _pytest.main import EXIT_USAGEERROR + + try: + try: + config = _prepareconfig(args, plugins) + except ConftestImportFailure as e: + exc_info = ExceptionInfo(e.excinfo) + tw = py.io.TerminalWriter(sys.stderr) + tw.line( + "ImportError while loading conftest '{e.path}'.".format(e=e), red=True + ) + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short", chain=False) + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = safe_str(exc_repr) + for line in formatted_tb.splitlines(): + tw.line(line.rstrip(), red=True) + return 4 + else: + try: + return config.hook.pytest_cmdline_main(config=config) + finally: + config._ensure_unconfigure() + except UsageError as e: + tw = py.io.TerminalWriter(sys.stderr) + for msg in e.args: + tw.line("ERROR: {}\n".format(msg), red=True) + return EXIT_USAGEERROR + + +class cmdline(object): # compatibility namespace + main = staticmethod(main) + + +def filename_arg(path, optname): + """ Argparse type validator for filename arguments. + + :path: path of filename + :optname: name of the option + """ + if os.path.isdir(path): + raise UsageError("{} must be a filename, given: {}".format(optname, path)) + return path + + +def directory_arg(path, optname): + """Argparse type validator for directory arguments. + + :path: path of directory + :optname: name of the option + """ + if not os.path.isdir(path): + raise UsageError("{} must be a directory, given: {}".format(optname, path)) + return path + + +default_plugins = ( + "mark", + "main", + "terminal", + "runner", + "python", + "fixtures", + "debugging", + "unittest", + "capture", + "skipping", + "tmpdir", + "monkeypatch", + "recwarn", + "pastebin", + "helpconfig", + "nose", + "assertion", + "junitxml", + "resultlog", + "doctest", + "cacheprovider", + "freeze_support", + "setuponly", + "setupplan", + "stepwise", + "warnings", + "logging", + "reports", +) + + +builtin_plugins = set(default_plugins) +builtin_plugins.add("pytester") + + +def get_config(args=None): + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + config = Config(pluginmanager) + + if args is not None: + # Handle any "-p no:plugin" args. + pluginmanager.consider_preparse(args) + + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return config + + +def get_plugin_manager(): + """ + Obtain a new instance of the + :py:class:`_pytest.config.PytestPluginManager`, with default plugins + already loaded. + + This function can be used by integration with other tools, like hooking + into pytest to run tests into an IDE. + """ + return get_config().pluginmanager + + +def _prepareconfig(args=None, plugins=None): + warning = None + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + msg = "`args` parameter expected to be a list or tuple of strings, got: {!r} (type: {})" + raise TypeError(msg.format(args, type(args))) + + config = get_config(args) + pluginmanager = config.pluginmanager + try: + if plugins: + for plugin in plugins: + if isinstance(plugin, six.string_types): + pluginmanager.consider_pluginarg(plugin) + else: + pluginmanager.register(plugin) + if warning: + from _pytest.warnings import _issue_warning_captured + + _issue_warning_captured(warning, hook=config.hook, stacklevel=4) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args + ) + except BaseException: + config._ensure_unconfigure() + raise + + +class PytestPluginManager(PluginManager): + """ + Overwrites :py:class:`pluggy.PluginManager ` to add pytest-specific + functionality: + + * loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and + ``pytest_plugins`` global variables found in plugins being loaded; + * ``conftest.py`` loading during start-up; + """ + + def __init__(self): + super(PytestPluginManager, self).__init__("pytest") + self._conftest_plugins = set() + + # state related to local conftest plugins + self._dirpath2confmods = {} + self._conftestpath2mod = {} + self._confcutdir = None + self._noconftest = False + self._duplicatepaths = set() + + self.add_hookspecs(_pytest.hookspec) + self.register(self) + if os.environ.get("PYTEST_DEBUG"): + err = sys.stderr + encoding = getattr(err, "encoding", "utf8") + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + self.enable_tracing() + + # Config._consider_importhook will set a real object if required. + self.rewrite_hook = _pytest.assertion.DummyRewriteHook() + # Used to know when we are importing conftests after the pytest_configure stage + self._configured = False + + def addhooks(self, module_or_class): + """ + .. deprecated:: 2.8 + + Use :py:meth:`pluggy.PluginManager.add_hookspecs ` + instead. + """ + warnings.warn(deprecated.PLUGIN_MANAGER_ADDHOOKS, stacklevel=2) + return self.add_hookspecs(module_or_class) + + def parse_hookimpl_opts(self, plugin, name): + # pytest hooks are always prefixed with pytest_ + # so we avoid accessing possibly non-readable attributes + # (see issue #1073) + if not name.startswith("pytest_"): + return + # ignore names which can not be hooks + if name == "pytest_plugins": + return + + method = getattr(plugin, name) + opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name) + + # consider only actual functions for hooks (#3775) + if not inspect.isroutine(method): + return + + # collect unmarked hooks as long as they have the `pytest_' prefix + if opts is None and name.startswith("pytest_"): + opts = {} + if opts is not None: + # TODO: DeprecationWarning, people should use hookimpl + # https://github.com/pytest-dev/pytest/issues/4562 + known_marks = {m.name for m in getattr(method, "pytestmark", [])} + + for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): + + opts.setdefault(name, hasattr(method, name) or name in known_marks) + return opts + + def parse_hookspec_opts(self, module_or_class, name): + opts = super(PytestPluginManager, self).parse_hookspec_opts( + module_or_class, name + ) + if opts is None: + method = getattr(module_or_class, name) + + if name.startswith("pytest_"): + # todo: deprecate hookspec hacks + # https://github.com/pytest-dev/pytest/issues/4562 + known_marks = {m.name for m in getattr(method, "pytestmark", [])} + opts = { + "firstresult": hasattr(method, "firstresult") + or "firstresult" in known_marks, + "historic": hasattr(method, "historic") + or "historic" in known_marks, + } + return opts + + def register(self, plugin, name=None): + if name in ["pytest_catchlog", "pytest_capturelog"]: + warnings.warn( + PytestWarning( + "{} plugin has been merged into the core, " + "please remove it from your requirements.".format( + name.replace("_", "-") + ) + ) + ) + return + ret = super(PytestPluginManager, self).register(plugin, name) + if ret: + self.hook.pytest_plugin_registered.call_historic( + kwargs=dict(plugin=plugin, manager=self) + ) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) + return ret + + def getplugin(self, name): + # support deprecated naming because plugins (xdist e.g.) use it + return self.get_plugin(name) + + def hasplugin(self, name): + """Return True if the plugin with the given name is registered.""" + return bool(self.get_plugin(name)) + + def pytest_configure(self, config): + # XXX now that the pluginmanager exposes hookimpl(tryfirst...) + # we should remove tryfirst/trylast as markers + config.addinivalue_line( + "markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.", + ) + config.addinivalue_line( + "markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.", + ) + self._configured = True + + # + # internal API for local conftest plugin handling + # + def _set_initial_conftests(self, namespace): + """ load initial conftest files given a preparsed "namespace". + As conftest files may add their own command line options + which have arguments ('--my-opt somepath') we might get some + false positives. All builtin and 3rd party plugins will have + been loaded, however, so common options will not confuse our logic + here. + """ + current = py.path.local() + self._confcutdir = ( + current.join(namespace.confcutdir, abs=True) + if namespace.confcutdir + else None + ) + self._noconftest = namespace.noconftest + self._using_pyargs = namespace.pyargs + testpaths = namespace.file_or_dir + foundanchor = False + for path in testpaths: + path = str(path) + # remove node-id syntax + i = path.find("::") + if i != -1: + path = path[:i] + anchor = current.join(path, abs=1) + if exists(anchor): # we found some file object + self._try_load_conftest(anchor) + foundanchor = True + if not foundanchor: + self._try_load_conftest(current) + + def _try_load_conftest(self, anchor): + self._getconftestmodules(anchor) + # let's also consider test* subdirs + if anchor.check(dir=1): + for x in anchor.listdir("test*"): + if x.check(dir=1): + self._getconftestmodules(x) + + @lru_cache(maxsize=128) + def _getconftestmodules(self, path): + if self._noconftest: + return [] + + if path.isfile(): + directory = path.dirpath() + else: + directory = path + + if six.PY2: # py2 is not using lru_cache. + try: + return self._dirpath2confmods[directory] + except KeyError: + pass + + # XXX these days we may rather want to use config.rootdir + # and allow users to opt into looking into the rootdir parent + # directories instead of requiring to specify confcutdir + clist = [] + for parent in directory.realpath().parts(): + if self._confcutdir and self._confcutdir.relto(parent): + continue + conftestpath = parent.join("conftest.py") + if conftestpath.isfile(): + # Use realpath to avoid loading the same conftest twice + # with build systems that create build directories containing + # symlinks to actual files. + mod = self._importconftest(conftestpath.realpath()) + clist.append(mod) + self._dirpath2confmods[directory] = clist + return clist + + def _rget_with_confmod(self, name, path): + modules = self._getconftestmodules(path) + for mod in reversed(modules): + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def _importconftest(self, conftestpath): + try: + return self._conftestpath2mod[conftestpath] + except KeyError: + pkgpath = conftestpath.pypkgpath() + if pkgpath is None: + _ensure_removed_sysmodule(conftestpath.purebasename) + try: + mod = conftestpath.pyimport() + if ( + hasattr(mod, "pytest_plugins") + and self._configured + and not self._using_pyargs + ): + from _pytest.deprecated import ( + PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST, + ) + + fail( + PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST.format( + conftestpath, self._confcutdir + ), + pytrace=False, + ) + except Exception: + raise ConftestImportFailure(conftestpath, sys.exc_info()) + + self._conftest_plugins.add(mod) + self._conftestpath2mod[conftestpath] = mod + dirpath = conftestpath.dirpath() + if dirpath in self._dirpath2confmods: + for path, mods in self._dirpath2confmods.items(): + if path and path.relto(dirpath) or path == dirpath: + assert mod not in mods + mods.append(mod) + self.trace("loaded conftestmodule %r" % (mod)) + self.consider_conftest(mod) + return mod + + # + # API for bootstrapping plugin loading + # + # + + def consider_preparse(self, args): + i = 0 + n = len(args) + while i < n: + opt = args[i] + i += 1 + if isinstance(opt, six.string_types): + if opt == "-p": + try: + parg = args[i] + except IndexError: + return + i += 1 + elif opt.startswith("-p"): + parg = opt[2:] + else: + continue + self.consider_pluginarg(parg) + + def consider_pluginarg(self, arg): + if arg.startswith("no:"): + name = arg[3:] + # PR #4304 : remove stepwise if cacheprovider is blocked + if name == "cacheprovider": + self.set_blocked("stepwise") + self.set_blocked("pytest_stepwise") + + self.set_blocked(name) + if not name.startswith("pytest_"): + self.set_blocked("pytest_" + name) + else: + name = arg + # Unblock the plugin. None indicates that it has been blocked. + # There is no interface with pluggy for this. + if self._name2plugin.get(name, -1) is None: + del self._name2plugin[name] + if not name.startswith("pytest_"): + if self._name2plugin.get("pytest_" + name, -1) is None: + del self._name2plugin["pytest_" + name] + self.import_plugin(arg, consider_entry_points=True) + + def consider_conftest(self, conftestmodule): + self.register(conftestmodule, name=conftestmodule.__file__) + + def consider_env(self): + self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) + + def consider_module(self, mod): + self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) + + def _import_plugin_specs(self, spec): + plugins = _get_plugin_specs_as_list(spec) + for import_spec in plugins: + self.import_plugin(import_spec) + + def import_plugin(self, modname, consider_entry_points=False): + """ + Imports a plugin with ``modname``. If ``consider_entry_points`` is True, entry point + names are also considered to find a plugin. + """ + # most often modname refers to builtin modules, e.g. "pytester", + # "terminal" or "capture". Those plugins are registered under their + # basename for historic purposes but must be imported with the + # _pytest prefix. + assert isinstance(modname, six.string_types), ( + "module name as text required, got %r" % modname + ) + modname = str(modname) + if self.is_blocked(modname) or self.get_plugin(modname) is not None: + return + + importspec = "_pytest." + modname if modname in builtin_plugins else modname + self.rewrite_hook.mark_rewrite(importspec) + + if consider_entry_points: + loaded = self.load_setuptools_entrypoints("pytest11", name=modname) + if loaded: + return + + try: + __import__(importspec) + except ImportError as e: + new_exc_message = 'Error importing plugin "%s": %s' % ( + modname, + safe_str(e.args[0]), + ) + new_exc = ImportError(new_exc_message) + tb = sys.exc_info()[2] + + six.reraise(ImportError, new_exc, tb) + + except Skipped as e: + from _pytest.warnings import _issue_warning_captured + + _issue_warning_captured( + PytestWarning("skipped plugin %r: %s" % (modname, e.msg)), + self.hook, + stacklevel=1, + ) + else: + mod = sys.modules[importspec] + self.register(mod, modname) + + +def _get_plugin_specs_as_list(specs): + """ + Parses a list of "plugin specs" and returns a list of plugin names. + + Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in + which case it is returned as a list. Specs can also be `None` in which case an + empty list is returned. + """ + if specs is not None and not isinstance(specs, types.ModuleType): + if isinstance(specs, six.string_types): + specs = specs.split(",") if specs else [] + if not isinstance(specs, (list, tuple)): + raise UsageError( + "Plugin specs must be a ','-separated string or a " + "list/tuple of strings for plugin names. Given: %r" % specs + ) + return list(specs) + return [] + + +def _ensure_removed_sysmodule(modname): + try: + del sys.modules[modname] + except KeyError: + pass + + +class Notset(object): + def __repr__(self): + return "" + + +notset = Notset() + + +def _iter_rewritable_modules(package_files): + for fn in package_files: + is_simple_module = "/" not in fn and fn.endswith(".py") + is_package = fn.count("/") == 1 and fn.endswith("__init__.py") + if is_simple_module: + module_name, _ = os.path.splitext(fn) + yield module_name + elif is_package: + package_name = os.path.dirname(fn) + yield package_name + + +class Config(object): + """ access to configuration values, pluginmanager and plugin hooks. """ + + def __init__(self, pluginmanager): + #: access to command line option as attributes. + #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead + self.option = argparse.Namespace() + from .argparsing import Parser, FILE_OR_DIR + + _a = FILE_OR_DIR + self._parser = Parser( + usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a), + processopt=self._processopt, + ) + #: a pluginmanager instance + self.pluginmanager = pluginmanager + self.trace = self.pluginmanager.trace.root.get("config") + self.hook = self.pluginmanager.hook + self._inicache = {} + self._override_ini = () + self._opt2dest = {} + self._cleanup = [] + self.pluginmanager.register(self, "pytestconfig") + self._configured = False + self.invocation_dir = py.path.local() + self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser)) + + def add_cleanup(self, func): + """ Add a function to be called when the config object gets out of + use (usually coninciding with pytest_unconfigure).""" + self._cleanup.append(func) + + def _do_configure(self): + assert not self._configured + self._configured = True + self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) + + def _ensure_unconfigure(self): + if self._configured: + self._configured = False + self.hook.pytest_unconfigure(config=self) + self.hook.pytest_configure._call_history = [] + while self._cleanup: + fin = self._cleanup.pop() + fin() + + def get_terminal_writer(self): + return self.pluginmanager.get_plugin("terminalreporter")._tw + + def pytest_cmdline_parse(self, pluginmanager, args): + try: + self.parse(args) + except UsageError: + + # Handle --version and --help here in a minimal fashion. + # This gets done via helpconfig normally, but its + # pytest_cmdline_main is not called in case of errors. + if getattr(self.option, "version", False) or "--version" in args: + from _pytest.helpconfig import showversion + + showversion(self) + elif ( + getattr(self.option, "help", False) or "--help" in args or "-h" in args + ): + self._parser._getparser().print_help() + sys.stdout.write( + "\nNOTE: displaying only minimal help due to UsageError.\n\n" + ) + + raise + + return self + + def notify_exception(self, excinfo, option=None): + if option and option.fulltrace: + style = "long" + else: + style = "native" + excrepr = excinfo.getrepr( + funcargs=True, showlocals=getattr(option, "showlocals", False), style=style + ) + res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) + if not any(res): + for line in str(excrepr).split("\n"): + sys.stderr.write("INTERNALERROR> %s\n" % line) + sys.stderr.flush() + + def cwd_relative_nodeid(self, nodeid): + # nodeid's are relative to the rootpath, compute relative to cwd + if self.invocation_dir != self.rootdir: + fullpath = self.rootdir.join(nodeid) + nodeid = self.invocation_dir.bestrelpath(fullpath) + return nodeid + + @classmethod + def fromdictargs(cls, option_dict, args): + """ constructor useable for subprocesses. """ + config = get_config(args) + config.option.__dict__.update(option_dict) + config.parse(args, addopts=False) + for x in config.option.plugins: + config.pluginmanager.consider_pluginarg(x) + return config + + def _processopt(self, opt): + for name in opt._short_opts + opt._long_opts: + self._opt2dest[name] = opt.dest + + if hasattr(opt, "default") and opt.dest: + if not hasattr(self.option, opt.dest): + setattr(self.option, opt.dest, opt.default) + + @hookimpl(trylast=True) + def pytest_load_initial_conftests(self, early_config): + self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) + + def _initini(self, args): + ns, unknown_args = self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + r = determine_setup( + ns.inifilename, + ns.file_or_dir + unknown_args, + rootdir_cmd_arg=ns.rootdir or None, + config=self, + ) + self.rootdir, self.inifile, self.inicfg = r + self._parser.extra_info["rootdir"] = self.rootdir + self._parser.extra_info["inifile"] = self.inifile + self._parser.addini("addopts", "extra command line options", "args") + self._parser.addini("minversion", "minimally required pytest version") + self._override_ini = ns.override_ini or () + + def _consider_importhook(self, args): + """Install the PEP 302 import hook if using assertion rewriting. + + Needs to parse the --assert= option from the commandline + and find all the installed plugins to mark them for rewriting + by the importhook. + """ + ns, unknown_args = self._parser.parse_known_and_unknown_args(args) + mode = getattr(ns, "assertmode", "plain") + if mode == "rewrite": + try: + hook = _pytest.assertion.install_importhook(self) + except SystemError: + mode = "plain" + else: + self._mark_plugins_for_rewrite(hook) + _warn_about_missing_assertion(mode) + + def _mark_plugins_for_rewrite(self, hook): + """ + Given an importhook, mark for rewrite any top-level + modules or packages in the distribution package for + all pytest plugins. + """ + import pkg_resources + + self.pluginmanager.rewrite_hook = hook + + if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # We don't autoload from setuptools entry points, no need to continue. + return + + # 'RECORD' available for plugins installed normally (pip install) + # 'SOURCES.txt' available for plugins installed in dev mode (pip install -e) + # for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa + # so it shouldn't be an issue + metadata_files = "RECORD", "SOURCES.txt" + + package_files = ( + entry.split(",")[0] + for entrypoint in pkg_resources.iter_entry_points("pytest11") + for metadata in metadata_files + for entry in entrypoint.dist._get_metadata(metadata) + ) + + for name in _iter_rewritable_modules(package_files): + hook.mark_rewrite(name) + + def _validate_args(self, args, via): + """Validate known args.""" + self._parser._config_source_hint = via + try: + self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + finally: + del self._parser._config_source_hint + + return args + + def _preparse(self, args, addopts=True): + if addopts: + env_addopts = os.environ.get("PYTEST_ADDOPTS", "") + if len(env_addopts): + args[:] = ( + self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") + + args + ) + self._initini(args) + if addopts: + args[:] = ( + self._validate_args(self.getini("addopts"), "via addopts config") + args + ) + + self._checkversion() + self._consider_importhook(args) + self.pluginmanager.consider_preparse(args) + if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # Don't autoload from setuptools entry point. Only explicitly specified + # plugins are going to be loaded. + self.pluginmanager.load_setuptools_entrypoints("pytest11") + self.pluginmanager.consider_env() + self.known_args_namespace = ns = self._parser.parse_known_args( + args, namespace=copy.copy(self.option) + ) + if self.known_args_namespace.confcutdir is None and self.inifile: + confcutdir = py.path.local(self.inifile).dirname + self.known_args_namespace.confcutdir = confcutdir + try: + self.hook.pytest_load_initial_conftests( + early_config=self, args=args, parser=self._parser + ) + except ConftestImportFailure: + e = sys.exc_info()[1] + if ns.help or ns.version: + # we don't want to prevent --help/--version to work + # so just let is pass and print a warning at the end + from _pytest.warnings import _issue_warning_captured + + _issue_warning_captured( + PytestWarning( + "could not load initial conftests: {}".format(e.path) + ), + self.hook, + stacklevel=2, + ) + else: + raise + + def _checkversion(self): + import pytest + from pkg_resources import parse_version + + minver = self.inicfg.get("minversion", None) + if minver: + if parse_version(minver) > parse_version(pytest.__version__): + raise pytest.UsageError( + "%s:%d: requires pytest-%s, actual pytest-%s'" + % ( + self.inicfg.config.path, + self.inicfg.lineof("minversion"), + minver, + pytest.__version__, + ) + ) + + def parse(self, args, addopts=True): + # parse given cmdline arguments into this config object. + assert not hasattr( + self, "args" + ), "can only parse cmdline args at most once per Config object" + self._origargs = args + self.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=self.pluginmanager) + ) + self._preparse(args, addopts=addopts) + # XXX deprecated hook: + self.hook.pytest_cmdline_preparse(config=self, args=args) + self._parser.after_preparse = True + try: + args = self._parser.parse_setoption( + args, self.option, namespace=self.option + ) + if not args: + if self.invocation_dir == self.rootdir: + args = self.getini("testpaths") + if not args: + args = [str(self.invocation_dir)] + self.args = args + except PrintHelp: + pass + + def addinivalue_line(self, name, line): + """ add a line to an ini-file option. The option must have been + declared but might not yet be set in which case the line becomes the + the first line in its value. """ + x = self.getini(name) + assert isinstance(x, list) + x.append(line) # modifies the cached list inline + + def getini(self, name): + """ return configuration value from an :ref:`ini file `. If the + specified name hasn't been registered through a prior + :py:func:`parser.addini <_pytest.config.Parser.addini>` + call (usually from a plugin), a ValueError is raised. """ + try: + return self._inicache[name] + except KeyError: + self._inicache[name] = val = self._getini(name) + return val + + def _getini(self, name): + try: + description, type, default = self._parser._inidict[name] + except KeyError: + raise ValueError("unknown configuration value: %r" % (name,)) + value = self._get_override_ini_value(name) + if value is None: + try: + value = self.inicfg[name] + except KeyError: + if default is not None: + return default + if type is None: + return "" + return [] + if type == "pathlist": + dp = py.path.local(self.inicfg.config.path).dirpath() + values = [] + for relpath in shlex.split(value): + values.append(dp.join(relpath, abs=True)) + return values + elif type == "args": + return shlex.split(value) + elif type == "linelist": + return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] + elif type == "bool": + return bool(_strtobool(value.strip())) + else: + assert type is None + return value + + def _getconftest_pathlist(self, name, path): + try: + mod, relroots = self.pluginmanager._rget_with_confmod(name, path) + except KeyError: + return None + modpath = py.path.local(mod.__file__).dirpath() + values = [] + for relroot in relroots: + if not isinstance(relroot, py.path.local): + relroot = relroot.replace("/", py.path.local.sep) + relroot = modpath.join(relroot, abs=True) + values.append(relroot) + return values + + def _get_override_ini_value(self, name): + value = None + # override_ini is a list of "ini=value" options + # always use the last item if multiple values are set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2 + for ini_config in self._override_ini: + try: + key, user_ini_value = ini_config.split("=", 1) + except ValueError: + raise UsageError("-o/--override-ini expects option=value style.") + else: + if key == name: + value = user_ini_value + return value + + def getoption(self, name, default=notset, skip=False): + """ return command line option value. + + :arg name: name of the option. You may also specify + the literal ``--OPT`` option instead of the "dest" option name. + :arg default: default value if no option of that name exists. + :arg skip: if True raise pytest.skip if option does not exists + or has a None value. + """ + name = self._opt2dest.get(name, name) + try: + val = getattr(self.option, name) + if val is None and skip: + raise AttributeError(name) + return val + except AttributeError: + if default is not notset: + return default + if skip: + import pytest + + pytest.skip("no %r option found" % (name,)) + raise ValueError("no option named %r" % (name,)) + + def getvalue(self, name, path=None): + """ (deprecated, use getoption()) """ + return self.getoption(name) + + def getvalueorskip(self, name, path=None): + """ (deprecated, use getoption(skip=True)) """ + return self.getoption(name, skip=True) + + +def _assertion_supported(): + try: + assert False + except AssertionError: + return True + else: + return False + + +def _warn_about_missing_assertion(mode): + if not _assertion_supported(): + if mode == "plain": + sys.stderr.write( + "WARNING: ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?" + ) + else: + sys.stderr.write( + "WARNING: assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n" + ) + + +def setns(obj, dic): + import pytest + + for name, value in dic.items(): + if isinstance(value, dict): + mod = getattr(obj, name, None) + if mod is None: + modname = "pytest.%s" % name + mod = types.ModuleType(modname) + sys.modules[modname] = mod + mod.__all__ = [] + setattr(obj, name, mod) + obj.__all__.append(name) + setns(mod, value) + else: + setattr(obj, name, value) + obj.__all__.append(name) + # if obj != pytest: + # pytest.__all__.append(name) + setattr(pytest, name, value) + + +def create_terminal_writer(config, *args, **kwargs): + """Create a TerminalWriter instance configured according to the options + in the config object. Every code which requires a TerminalWriter object + and has access to a config object should use this function. + """ + tw = py.io.TerminalWriter(*args, **kwargs) + if config.option.color == "yes": + tw.hasmarkup = True + if config.option.color == "no": + tw.hasmarkup = False + return tw + + +def _strtobool(val): + """Convert a string representation of truth to true (1) or false (0). + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + + .. note:: copied from distutils.util + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return 1 + elif val in ("n", "no", "f", "false", "off", "0"): + return 0 + else: + raise ValueError("invalid truth value %r" % (val,)) diff --git a/env_27/lib/python2.7/site-packages/_pytest/config/argparsing.py b/env_27/lib/python2.7/site-packages/_pytest/config/argparsing.py new file mode 100644 index 0000000..cc48ed3 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/config/argparsing.py @@ -0,0 +1,409 @@ +import argparse +import warnings + +import py +import six + +from _pytest.config.exceptions import UsageError + +FILE_OR_DIR = "file_or_dir" + + +class Parser(object): + """ Parser for command line arguments and ini-file values. + + :ivar extra_info: dict of generic param -> value to display in case + there's an error processing the command line arguments. + """ + + prog = None + + def __init__(self, usage=None, processopt=None): + self._anonymous = OptionGroup("custom options", parser=self) + self._groups = [] + self._processopt = processopt + self._usage = usage + self._inidict = {} + self._ininames = [] + self.extra_info = {} + + def processoption(self, option): + if self._processopt: + if option.dest: + self._processopt(option) + + def getgroup(self, name, description="", after=None): + """ get (or create) a named option Group. + + :name: name of the option group. + :description: long description for --help output. + :after: name of other group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. + """ + for group in self._groups: + if group.name == name: + return group + group = OptionGroup(name, description, parser=self) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i + 1, group) + return group + + def addoption(self, *opts, **attrs): + """ register a command line option. + + :opts: option names, can be short or long options. + :attrs: same attributes which the ``add_option()`` function of the + `argparse library + `_ + accepts. + + After command line parsing options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ + self._anonymous.addoption(*opts, **attrs) + + def parse(self, args, namespace=None): + from _pytest._argcomplete import try_argcomplete + + self.optparser = self._getparser() + try_argcomplete(self.optparser) + args = [str(x) if isinstance(x, py.path.local) else x for x in args] + return self.optparser.parse_args(args, namespace=namespace) + + def _getparser(self): + from _pytest._argcomplete import filescompleter + + optparser = MyOptionParser(self, self.extra_info, prog=self.prog) + groups = self._groups + [self._anonymous] + for group in groups: + if group.options: + desc = group.description or group.name + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + # bash like autocompletion for dirs (appending '/') + optparser.add_argument(FILE_OR_DIR, nargs="*").completer = filescompleter + return optparser + + def parse_setoption(self, args, option, namespace=None): + parsedoption = self.parse(args, namespace=namespace) + for name, value in parsedoption.__dict__.items(): + setattr(option, name, value) + return getattr(parsedoption, FILE_OR_DIR) + + def parse_known_args(self, args, namespace=None): + """parses and returns a namespace object with known arguments at this + point. + """ + return self.parse_known_and_unknown_args(args, namespace=namespace)[0] + + def parse_known_and_unknown_args(self, args, namespace=None): + """parses and returns a namespace object with known arguments, and + the remaining arguments unknown at this point. + """ + optparser = self._getparser() + args = [str(x) if isinstance(x, py.path.local) else x for x in args] + return optparser.parse_known_args(args, namespace=namespace) + + def addini(self, name, help, type=None, default=None): + """ register an ini-file option. + + :name: name of the ini-variable + :type: type of the variable, can be ``pathlist``, ``args``, ``linelist`` + or ``bool``. + :default: default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ + assert type in (None, "pathlist", "args", "linelist", "bool") + self._inidict[name] = (help, type, default) + self._ininames.append(name) + + +class ArgumentError(Exception): + """ + Raised if an Argument instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + + +class Argument(object): + """class that mimics the necessary behaviour of optparse.Option + + it's currently a least effort implementation + and ignoring choices and integer prefixes + https://docs.python.org/3/library/optparse.html#optparse-standard-option-types + """ + + _typ_map = {"int": int, "string": str, "float": float, "complex": complex} + + def __init__(self, *names, **attrs): + """store parms in private vars for use in add_argument""" + self._attrs = attrs + self._short_opts = [] + self._long_opts = [] + self.dest = attrs.get("dest") + if "%default" in (attrs.get("help") or ""): + warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + DeprecationWarning, + stacklevel=3, + ) + try: + typ = attrs["type"] + except KeyError: + pass + else: + # this might raise a keyerror as well, don't want to catch that + if isinstance(typ, six.string_types): + if typ == "choice": + warnings.warn( + "`type` argument to addoption() is the string %r." + " For choices this is optional and can be omitted, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: %s)" % (typ, names), + DeprecationWarning, + stacklevel=4, + ) + # argparse expects a type here take it from + # the type of the first element + attrs["type"] = type(attrs["choices"][0]) + else: + warnings.warn( + "`type` argument to addoption() is the string %r, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: %s)" % (typ, names), + DeprecationWarning, + stacklevel=4, + ) + attrs["type"] = Argument._typ_map[typ] + # used in test_parseopt -> test_parse_defaultgetter + self.type = attrs["type"] + else: + self.type = typ + try: + # attribute existence is tested in Config._processopt + self.default = attrs["default"] + except KeyError: + pass + self._set_opt_strings(names) + if not self.dest: + if self._long_opts: + self.dest = self._long_opts[0][2:].replace("-", "_") + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError: + raise ArgumentError("need a long or short option", self) + + def names(self): + return self._short_opts + self._long_opts + + def attrs(self): + # update any attributes set by processopt + attrs = "default dest help".split() + if self.dest: + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get("help"): + a = self._attrs["help"] + a = a.replace("%default", "%(default)s") + # a = a.replace('%prog', '%(prog)s') + self._attrs["help"] = a + return self._attrs + + def _set_opt_strings(self, opts): + """directly from optparse + + might not be necessary as this is passed to argparse later on""" + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, + self, + ) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self, + ) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self, + ) + self._long_opts.append(opt) + + def __repr__(self): + args = [] + if self._short_opts: + args += ["_short_opts: " + repr(self._short_opts)] + if self._long_opts: + args += ["_long_opts: " + repr(self._long_opts)] + args += ["dest: " + repr(self.dest)] + if hasattr(self, "type"): + args += ["type: " + repr(self.type)] + if hasattr(self, "default"): + args += ["default: " + repr(self.default)] + return "Argument({})".format(", ".join(args)) + + +class OptionGroup(object): + def __init__(self, name, description="", parser=None): + self.name = name + self.description = description + self.options = [] + self.parser = parser + + def addoption(self, *optnames, **attrs): + """ add an option to this group. + + if a shortened version of a long option is specified it will + be suppressed in the help. addoption('--twowords', '--two-words') + results in help showing '--two-words' only, but --twowords gets + accepted **and** the automatic destination is in args.twowords + """ + conflict = set(optnames).intersection( + name for opt in self.options for name in opt.names() + ) + if conflict: + raise ValueError("option names %s already added" % conflict) + option = Argument(*optnames, **attrs) + self._addoption_instance(option, shortupper=False) + + def _addoption(self, *optnames, **attrs): + option = Argument(*optnames, **attrs) + self._addoption_instance(option, shortupper=True) + + def _addoption_instance(self, option, shortupper=False): + if not shortupper: + for opt in option._short_opts: + if opt[0] == "-" and opt[1].islower(): + raise ValueError("lowercase shortoptions reserved") + if self.parser: + self.parser.processoption(option) + self.options.append(option) + + +class MyOptionParser(argparse.ArgumentParser): + def __init__(self, parser, extra_info=None, prog=None): + if not extra_info: + extra_info = {} + self._parser = parser + argparse.ArgumentParser.__init__( + self, + prog=prog, + usage=parser._usage, + add_help=False, + formatter_class=DropShorterLongHelpFormatter, + ) + # extra_info is a dict of (param -> value) to display if there's + # an usage error to provide more contextual information to the user + self.extra_info = extra_info + + def error(self, message): + """Transform argparse error message into UsageError.""" + msg = "%s: error: %s" % (self.prog, message) + + if hasattr(self._parser, "_config_source_hint"): + msg = "%s (%s)" % (msg, self._parser._config_source_hint) + + raise UsageError(self.format_usage() + msg) + + def parse_args(self, args=None, namespace=None): + """allow splitting of positional arguments""" + args, argv = self.parse_known_args(args, namespace) + if argv: + for arg in argv: + if arg and arg[0] == "-": + lines = ["unrecognized arguments: %s" % (" ".join(argv))] + for k, v in sorted(self.extra_info.items()): + lines.append(" %s: %s" % (k, v)) + self.error("\n".join(lines)) + getattr(args, FILE_OR_DIR).extend(argv) + return args + + +class DropShorterLongHelpFormatter(argparse.HelpFormatter): + """shorten help for long options that differ only in extra hyphens + + - collapse **long** options that are the same except for extra hyphens + - special action attribute map_long_option allows surpressing additional + long options + - shortcut if there are only two options and one of them is a short one + - cache result on action object as this is called at least 2 times + """ + + def _format_action_invocation(self, action): + orgstr = argparse.HelpFormatter._format_action_invocation(self, action) + if orgstr and orgstr[0] != "-": # only optional arguments + return orgstr + res = getattr(action, "_formatted_action_invocation", None) + if res: + return res + options = orgstr.split(", ") + if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): + # a shortcut for '-h, --help' or '--abc', '-a' + action._formatted_action_invocation = orgstr + return orgstr + return_list = [] + option_map = getattr(action, "map_long_option", {}) + if option_map is None: + option_map = {} + short_long = {} + for option in options: + if len(option) == 2 or option[2] == " ": + continue + if not option.startswith("--"): + raise ArgumentError( + 'long optional argument without "--": [%s]' % (option), self + ) + xxoption = option[2:] + if xxoption.split()[0] not in option_map: + shortened = xxoption.replace("-", "") + if shortened not in short_long or len(short_long[shortened]) < len( + xxoption + ): + short_long[shortened] = xxoption + # now short_long has been filled out to the longest with dashes + # **and** we keep the right option ordering from add_argument + for option in options: + if len(option) == 2 or option[2] == " ": + return_list.append(option) + if option[2:] == short_long.get(option.replace("-", "")): + return_list.append(option.replace(" ", "=", 1)) + action._formatted_action_invocation = ", ".join(return_list) + return action._formatted_action_invocation diff --git a/env_27/lib/python2.7/site-packages/_pytest/config/exceptions.py b/env_27/lib/python2.7/site-packages/_pytest/config/exceptions.py new file mode 100644 index 0000000..19fe5cb --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/config/exceptions.py @@ -0,0 +1,9 @@ +class UsageError(Exception): + """ error in pytest usage or invocation""" + + +class PrintHelp(Exception): + """Raised when pytest should print it's help to skip the rest of the + argument parsing and validation.""" + + pass diff --git a/env_27/lib/python2.7/site-packages/_pytest/config/findpaths.py b/env_27/lib/python2.7/site-packages/_pytest/config/findpaths.py new file mode 100644 index 0000000..fa20244 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/config/findpaths.py @@ -0,0 +1,148 @@ +import os + +import py + +from .exceptions import UsageError +from _pytest.outcomes import fail + + +def exists(path, ignore=EnvironmentError): + try: + return path.check() + except ignore: + return False + + +def getcfg(args, config=None): + """ + Search the list of arguments for a valid ini-file for pytest, + and return a tuple of (rootdir, inifile, cfg-dict). + + note: config is optional and used only to issue warnings explicitly (#2891). + """ + from _pytest.deprecated import CFG_PYTEST_SECTION + + inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"] + args = [x for x in args if not str(x).startswith("-")] + if not args: + args = [py.path.local()] + for arg in args: + arg = py.path.local(arg) + for base in arg.parts(reverse=True): + for inibasename in inibasenames: + p = base.join(inibasename) + if exists(p): + iniconfig = py.iniconfig.IniConfig(p) + if ( + inibasename == "setup.cfg" + and "tool:pytest" in iniconfig.sections + ): + return base, p, iniconfig["tool:pytest"] + elif "pytest" in iniconfig.sections: + if inibasename == "setup.cfg" and config is not None: + + fail( + CFG_PYTEST_SECTION.format(filename=inibasename), + pytrace=False, + ) + return base, p, iniconfig["pytest"] + elif inibasename == "pytest.ini": + # allowed to be empty + return base, p, {} + return None, None, None + + +def get_common_ancestor(paths): + common_ancestor = None + for path in paths: + if not path.exists(): + continue + if common_ancestor is None: + common_ancestor = path + else: + if path.relto(common_ancestor) or path == common_ancestor: + continue + elif common_ancestor.relto(path): + common_ancestor = path + else: + shared = path.common(common_ancestor) + if shared is not None: + common_ancestor = shared + if common_ancestor is None: + common_ancestor = py.path.local() + elif common_ancestor.isfile(): + common_ancestor = common_ancestor.dirpath() + return common_ancestor + + +def get_dirs_from_args(args): + def is_option(x): + return str(x).startswith("-") + + def get_file_part_from_node_id(x): + return str(x).split("::")[0] + + def get_dir_from_path(path): + if path.isdir(): + return path + return py.path.local(path.dirname) + + # These look like paths but may not exist + possible_paths = ( + py.path.local(get_file_part_from_node_id(arg)) + for arg in args + if not is_option(arg) + ) + + return [get_dir_from_path(path) for path in possible_paths if path.exists()] + + +def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None): + dirs = get_dirs_from_args(args) + if inifile: + iniconfig = py.iniconfig.IniConfig(inifile) + is_cfg_file = str(inifile).endswith(".cfg") + sections = ["tool:pytest", "pytest"] if is_cfg_file else ["pytest"] + for section in sections: + try: + inicfg = iniconfig[section] + if is_cfg_file and section == "pytest" and config is not None: + from _pytest.deprecated import CFG_PYTEST_SECTION + + fail( + CFG_PYTEST_SECTION.format(filename=str(inifile)), pytrace=False + ) + break + except KeyError: + inicfg = None + if rootdir_cmd_arg is None: + rootdir = get_common_ancestor(dirs) + else: + ancestor = get_common_ancestor(dirs) + rootdir, inifile, inicfg = getcfg([ancestor], config=config) + if rootdir is None and rootdir_cmd_arg is None: + for possible_rootdir in ancestor.parts(reverse=True): + if possible_rootdir.join("setup.py").exists(): + rootdir = possible_rootdir + break + else: + if dirs != [ancestor]: + rootdir, inifile, inicfg = getcfg(dirs, config=config) + if rootdir is None: + if config is not None: + cwd = config.invocation_dir + else: + cwd = py.path.local() + rootdir = get_common_ancestor([cwd, ancestor]) + is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/" + if is_fs_root: + rootdir = ancestor + if rootdir_cmd_arg: + rootdir = py.path.local(os.path.expandvars(rootdir_cmd_arg)) + if not rootdir.isdir(): + raise UsageError( + "Directory '{}' not found. Check your '--rootdir' option.".format( + rootdir + ) + ) + return rootdir, inifile, inicfg or {} diff --git a/env_27/lib/python2.7/site-packages/_pytest/debugging.py b/env_27/lib/python2.7/site-packages/_pytest/debugging.py new file mode 100644 index 0000000..cb1c964 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/debugging.py @@ -0,0 +1,308 @@ +""" interactive debugging with PDB, the Python Debugger. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import pdb +import sys +from doctest import UnexpectedException + +from _pytest import outcomes +from _pytest.config import hookimpl + + +def _validate_usepdb_cls(value): + try: + modname, classname = value.split(":") + except ValueError: + raise argparse.ArgumentTypeError( + "{!r} is not in the format 'modname:classname'".format(value) + ) + + try: + __import__(modname) + mod = sys.modules[modname] + + # Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp). + parts = classname.split(".") + pdb_cls = getattr(mod, parts[0]) + for part in parts[1:]: + pdb_cls = getattr(pdb_cls, part) + + return pdb_cls + except Exception as exc: + raise argparse.ArgumentTypeError( + "could not get pdb class for {!r}: {}".format(value, exc) + ) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + "--pdb", + dest="usepdb", + action="store_true", + help="start the interactive Python debugger on errors or KeyboardInterrupt.", + ) + group._addoption( + "--pdbcls", + dest="usepdb_cls", + metavar="modulename:classname", + type=_validate_usepdb_cls, + help="start a custom interactive Python debugger on errors. " + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", + ) + group._addoption( + "--trace", + dest="trace", + action="store_true", + help="Immediately break when running each test.", + ) + + +def pytest_configure(config): + pdb_cls = config.getvalue("usepdb_cls") + if not pdb_cls: + pdb_cls = pdb.Pdb + + if config.getvalue("trace"): + config.pluginmanager.register(PdbTrace(), "pdbtrace") + if config.getvalue("usepdb"): + config.pluginmanager.register(PdbInvoke(), "pdbinvoke") + + pytestPDB._saved.append( + (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config, pytestPDB._pdb_cls) + ) + pdb.set_trace = pytestPDB.set_trace + pytestPDB._pluginmanager = config.pluginmanager + pytestPDB._config = config + pytestPDB._pdb_cls = pdb_cls + + # NOTE: not using pytest_unconfigure, since it might get called although + # pytest_configure was not (if another plugin raises UsageError). + def fin(): + ( + pdb.set_trace, + pytestPDB._pluginmanager, + pytestPDB._config, + pytestPDB._pdb_cls, + ) = pytestPDB._saved.pop() + + config._cleanup.append(fin) + + +class pytestPDB(object): + """ Pseudo PDB that defers to the real pdb. """ + + _pluginmanager = None + _config = None + _pdb_cls = pdb.Pdb + _saved = [] + _recursive_debug = 0 + + @classmethod + def _is_capturing(cls, capman): + if capman: + return capman.is_capturing() + return False + + @classmethod + def _init_pdb(cls, *args, **kwargs): + """ Initialize PDB debugging, dropping any IO capturing. """ + import _pytest.config + + if cls._pluginmanager is not None: + capman = cls._pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend(in_=True) + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + if cls._recursive_debug == 0: + # Handle header similar to pdb.set_trace in py37+. + header = kwargs.pop("header", None) + if header is not None: + tw.sep(">", header) + else: + capturing = cls._is_capturing(capman) + if capturing: + if capturing == "global": + tw.sep(">", "PDB set_trace (IO-capturing turned off)") + else: + tw.sep( + ">", + "PDB set_trace (IO-capturing turned off for %s)" + % capturing, + ) + else: + tw.sep(">", "PDB set_trace") + + class _PdbWrapper(cls._pdb_cls, object): + _pytest_capman = capman + _continued = False + + def do_debug(self, arg): + cls._recursive_debug += 1 + ret = super(_PdbWrapper, self).do_debug(arg) + cls._recursive_debug -= 1 + return ret + + def do_continue(self, arg): + ret = super(_PdbWrapper, self).do_continue(arg) + if cls._recursive_debug == 0: + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + capman = self._pytest_capman + capturing = pytestPDB._is_capturing(capman) + if capturing: + if capturing == "global": + tw.sep(">", "PDB continue (IO-capturing resumed)") + else: + tw.sep( + ">", + "PDB continue (IO-capturing resumed for %s)" + % capturing, + ) + capman.resume() + else: + tw.sep(">", "PDB continue") + cls._pluginmanager.hook.pytest_leave_pdb( + config=cls._config, pdb=self + ) + self._continued = True + return ret + + do_c = do_cont = do_continue + + def set_quit(self): + """Raise Exit outcome when quit command is used in pdb. + + This is a bit of a hack - it would be better if BdbQuit + could be handled, but this would require to wrap the + whole pytest run, and adjust the report etc. + """ + super(_PdbWrapper, self).set_quit() + if cls._recursive_debug == 0: + outcomes.exit("Quitting debugger") + + def setup(self, f, tb): + """Suspend on setup(). + + Needed after do_continue resumed, and entering another + breakpoint again. + """ + ret = super(_PdbWrapper, self).setup(f, tb) + if not ret and self._continued: + # pdb.setup() returns True if the command wants to exit + # from the interaction: do not suspend capturing then. + if self._pytest_capman: + self._pytest_capman.suspend_global_capture(in_=True) + return ret + + _pdb = _PdbWrapper(**kwargs) + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb) + else: + _pdb = cls._pdb_cls(**kwargs) + return _pdb + + @classmethod + def set_trace(cls, *args, **kwargs): + """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.""" + frame = sys._getframe().f_back + _pdb = cls._init_pdb(*args, **kwargs) + _pdb.set_trace(frame) + + +class PdbInvoke(object): + def pytest_exception_interact(self, node, call, report): + capman = node.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stdout.write(err) + _enter_pdb(node, call.excinfo, report) + + def pytest_internalerror(self, excrepr, excinfo): + tb = _postmortem_traceback(excinfo) + post_mortem(tb) + + +class PdbTrace(object): + @hookimpl(hookwrapper=True) + def pytest_pyfunc_call(self, pyfuncitem): + _test_pytest_function(pyfuncitem) + yield + + +def _test_pytest_function(pyfuncitem): + _pdb = pytestPDB._init_pdb() + testfunction = pyfuncitem.obj + pyfuncitem.obj = _pdb.runcall + if "func" in pyfuncitem._fixtureinfo.argnames: # noqa + raise ValueError("--trace can't be used with a fixture named func!") + pyfuncitem.funcargs["func"] = testfunction + new_list = list(pyfuncitem._fixtureinfo.argnames) + new_list.append("func") + pyfuncitem._fixtureinfo.argnames = tuple(new_list) + + +def _enter_pdb(node, excinfo, rep): + # XXX we re-use the TerminalReporter's terminalwriter + # because this seems to avoid some encoding related troubles + # for not completely clear reasons. + tw = node.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + + showcapture = node.config.option.showcapture + + for sectionname, content in ( + ("stdout", rep.capstdout), + ("stderr", rep.capstderr), + ("log", rep.caplog), + ): + if showcapture in (sectionname, "all") and content: + tw.sep(">", "captured " + sectionname) + if content[-1:] == "\n": + content = content[:-1] + tw.line(content) + + tw.sep(">", "traceback") + rep.toterminal(tw) + tw.sep(">", "entering PDB") + tb = _postmortem_traceback(excinfo) + rep._pdbshown = True + post_mortem(tb) + return rep + + +def _postmortem_traceback(excinfo): + if isinstance(excinfo.value, UnexpectedException): + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: + return excinfo.value.exc_info[2] + else: + return excinfo._excinfo[2] + + +def _find_last_non_hidden_frame(stack): + i = max(0, len(stack) - 1) + while i and stack[i][0].f_locals.get("__tracebackhide__", False): + i -= 1 + return i + + +def post_mortem(t): + class Pdb(pytestPDB._pdb_cls, object): + def get_stack(self, f, t): + stack, i = super(Pdb, self).get_stack(f, t) + if f is None: + i = _find_last_non_hidden_frame(stack) + return stack, i + + p = Pdb() + p.reset() + p.interaction(None, t) + if p.quitting: + outcomes.exit("Quitting debugger") diff --git a/env_27/lib/python2.7/site-packages/_pytest/deprecated.py b/env_27/lib/python2.7/site-packages/_pytest/deprecated.py new file mode 100644 index 0000000..6134ca7 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/deprecated.py @@ -0,0 +1,95 @@ +""" +This module contains deprecation messages and bits of code used elsewhere in the codebase +that is planned to be removed in the next pytest release. + +Keeping it in a central location makes it easy to track what is deprecated and should +be removed when the time comes. + +All constants defined in this module should be either PytestWarning instances or UnformattedWarning +in case of warnings which need to format their messages. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from _pytest.warning_types import PytestDeprecationWarning +from _pytest.warning_types import RemovedInPytest4Warning +from _pytest.warning_types import UnformattedWarning + +YIELD_TESTS = "yield tests were removed in pytest 4.0 - {name} will be ignored" + + +FIXTURE_FUNCTION_CALL = ( + 'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n' + "but are created automatically when test functions request them as parameters.\n" + "See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and\n" + "https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code." +) + +FIXTURE_NAMED_REQUEST = PytestDeprecationWarning( + "'request' is a reserved name for fixtures and will raise an error in future versions" +) + +CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead." + +GETFUNCARGVALUE = RemovedInPytest4Warning( + "getfuncargvalue is deprecated, use getfixturevalue" +) + +RAISES_MESSAGE_PARAMETER = PytestDeprecationWarning( + "The 'message' parameter is deprecated.\n" + "(did you mean to use `match='some regex'` to check the exception message?)\n" + "Please comment on https://github.com/pytest-dev/pytest/issues/3974 " + "if you have concerns about removal of this parameter." +) + +RESULT_LOG = PytestDeprecationWarning( + "--result-log is deprecated and scheduled for removal in pytest 5.0.\n" + "See https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log for more information." +) + +MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning( + "MarkInfo objects are deprecated as they contain merged marks which are hard to deal with correctly.\n" + "Please use node.get_closest_marker(name) or node.iter_markers(name).\n" + "Docs: https://docs.pytest.org/en/latest/mark.html#updating-code" +) + +RAISES_EXEC = PytestDeprecationWarning( + "raises(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly\n\n" + "See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec" +) +WARNS_EXEC = PytestDeprecationWarning( + "warns(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly.\n\n" + "See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec" +) + +PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = ( + "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported " + "because it affects the entire directory tree in a non-explicit way.\n" + " {}\n" + "Please move it to a top level conftest file at the rootdir:\n" + " {}\n" + "For more information, visit:\n" + " https://docs.pytest.org/en/latest/deprecations.html#pytest-plugins-in-non-top-level-conftest-files" +) + +PYTEST_CONFIG_GLOBAL = PytestDeprecationWarning( + "the `pytest.config` global is deprecated. Please use `request.config` " + "or `pytest_configure` (if you're a pytest plugin) instead." +) + +PYTEST_ENSURETEMP = RemovedInPytest4Warning( + "pytest/tmpdir_factory.ensuretemp is deprecated, \n" + "please use the tmp_path fixture or tmp_path_factory.mktemp" +) + +PYTEST_LOGWARNING = PytestDeprecationWarning( + "pytest_logwarning is deprecated, no longer being called, and will be removed soon\n" + "please use pytest_warning_captured instead" +) + +PYTEST_WARNS_UNKNOWN_KWARGS = UnformattedWarning( + PytestDeprecationWarning, + "pytest.warns() got unexpected keyword arguments: {args!r}.\n" + "This will be an error in future versions.", +) diff --git a/env_27/lib/python2.7/site-packages/_pytest/doctest.py b/env_27/lib/python2.7/site-packages/_pytest/doctest.py new file mode 100644 index 0000000..34b6c9d --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/doctest.py @@ -0,0 +1,572 @@ +""" discover and run doctests in modules and test files.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import inspect +import platform +import sys +import traceback +from contextlib import contextmanager + +import pytest +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import TerminalRepr +from _pytest.compat import safe_getattr +from _pytest.fixtures import FixtureRequest +from _pytest.outcomes import Skipped + +DOCTEST_REPORT_CHOICE_NONE = "none" +DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" +DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" +DOCTEST_REPORT_CHOICE_UDIFF = "udiff" +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" + +DOCTEST_REPORT_CHOICES = ( + DOCTEST_REPORT_CHOICE_NONE, + DOCTEST_REPORT_CHOICE_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF, + DOCTEST_REPORT_CHOICE_UDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, +) + +# Lazy definition of runner class +RUNNER_CLASS = None + + +def pytest_addoption(parser): + parser.addini( + "doctest_optionflags", + "option flags for doctests", + type="args", + default=["ELLIPSIS"], + ) + parser.addini( + "doctest_encoding", "encoding used for doctest files", default="utf-8" + ) + group = parser.getgroup("collect") + group.addoption( + "--doctest-modules", + action="store_true", + default=False, + help="run doctests in all .py modules", + dest="doctestmodules", + ) + group.addoption( + "--doctest-report", + type=str.lower, + default="udiff", + help="choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport", + ) + group.addoption( + "--doctest-glob", + action="append", + default=[], + metavar="pat", + help="doctests file matching pattern, default: test*.txt", + dest="doctestglob", + ) + group.addoption( + "--doctest-ignore-import-errors", + action="store_true", + default=False, + help="ignore doctest ImportErrors", + dest="doctest_ignore_import_errors", + ) + group.addoption( + "--doctest-continue-on-failure", + action="store_true", + default=False, + help="for a given doctest, continue to run after the first failure", + dest="doctest_continue_on_failure", + ) + + +def pytest_collect_file(path, parent): + config = parent.config + if path.ext == ".py": + if config.option.doctestmodules and not _is_setup_py(config, path, parent): + return DoctestModule(path, parent) + elif _is_doctest(config, path, parent): + return DoctestTextfile(path, parent) + + +def _is_setup_py(config, path, parent): + if path.basename != "setup.py": + return False + contents = path.read() + return "setuptools" in contents or "distutils" in contents + + +def _is_doctest(config, path, parent): + if path.ext in (".txt", ".rst") and parent.session.isinitpath(path): + return True + globs = config.getoption("doctestglob") or ["test*.txt"] + for glob in globs: + if path.check(fnmatch=glob): + return True + return False + + +class ReprFailDoctest(TerminalRepr): + def __init__(self, reprlocation_lines): + # List of (reprlocation, lines) tuples + self.reprlocation_lines = reprlocation_lines + + def toterminal(self, tw): + for reprlocation, lines in self.reprlocation_lines: + for line in lines: + tw.line(line) + reprlocation.toterminal(tw) + + +class MultipleDoctestFailures(Exception): + def __init__(self, failures): + super(MultipleDoctestFailures, self).__init__() + self.failures = failures + + +def _init_runner_class(): + import doctest + + class PytestDoctestRunner(doctest.DebugRunner): + """ + Runner to collect failures. Note that the out variable in this case is + a list instead of a stdout-like object + """ + + def __init__( + self, checker=None, verbose=None, optionflags=0, continue_on_failure=True + ): + doctest.DebugRunner.__init__( + self, checker=checker, verbose=verbose, optionflags=optionflags + ) + self.continue_on_failure = continue_on_failure + + def report_failure(self, out, test, example, got): + failure = doctest.DocTestFailure(test, example, got) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + def report_unexpected_exception(self, out, test, example, exc_info): + if isinstance(exc_info[1], Skipped): + raise exc_info[1] + failure = doctest.UnexpectedException(test, example, exc_info) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + return PytestDoctestRunner + + +def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True): + # We need this in order to do a lazy import on doctest + global RUNNER_CLASS + if RUNNER_CLASS is None: + RUNNER_CLASS = _init_runner_class() + return RUNNER_CLASS( + checker=checker, + verbose=verbose, + optionflags=optionflags, + continue_on_failure=continue_on_failure, + ) + + +class DoctestItem(pytest.Item): + def __init__(self, name, parent, runner=None, dtest=None): + super(DoctestItem, self).__init__(name, parent) + self.runner = runner + self.dtest = dtest + self.obj = None + self.fixture_request = None + + def setup(self): + if self.dtest is not None: + self.fixture_request = _setup_fixtures(self) + globs = dict(getfixture=self.fixture_request.getfixturevalue) + for name, value in self.fixture_request.getfixturevalue( + "doctest_namespace" + ).items(): + globs[name] = value + self.dtest.globs.update(globs) + + def runtest(self): + _check_all_skipped(self.dtest) + self._disable_output_capturing_for_darwin() + failures = [] + self.runner.run(self.dtest, out=failures) + if failures: + raise MultipleDoctestFailures(failures) + + def _disable_output_capturing_for_darwin(self): + """ + Disable output capturing. Otherwise, stdout is lost to doctest (#985) + """ + if platform.system() != "Darwin": + return + capman = self.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + def repr_failure(self, excinfo): + import doctest + + failures = None + if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)): + failures = [excinfo.value] + elif excinfo.errisinstance(MultipleDoctestFailures): + failures = excinfo.value.failures + + if failures is not None: + reprlocation_lines = [] + for failure in failures: + example = failure.example + test = failure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = type(failure).__name__ + reprlocation = ReprFileLocation(filename, lineno, message) + checker = _get_checker() + report_choice = _get_report_choice( + self.config.getoption("doctestreport") + ) + if lineno is not None: + lines = failure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + lines = [ + "%03d %s" % (i + test.lineno + 1, x) + for (i, x) in enumerate(lines) + ] + # trim docstring error lines to 10 + lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] + else: + lines = [ + "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" + ] + indent = ">>>" + for line in example.source.splitlines(): + lines.append("??? %s %s" % (indent, line)) + indent = "..." + if isinstance(failure, doctest.DocTestFailure): + lines += checker.output_difference( + example, failure.got, report_choice + ).split("\n") + else: + inner_excinfo = ExceptionInfo(failure.exc_info) + lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] + lines += traceback.format_exception(*failure.exc_info) + reprlocation_lines.append((reprlocation, lines)) + return ReprFailDoctest(reprlocation_lines) + else: + return super(DoctestItem, self).repr_failure(excinfo) + + def reportinfo(self): + return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name + + +def _get_flag_lookup(): + import doctest + + return dict( + DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, + DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, + NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, + ELLIPSIS=doctest.ELLIPSIS, + IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag(), + ALLOW_BYTES=_get_allow_bytes_flag(), + ) + + +def get_optionflags(parent): + optionflags_str = parent.config.getini("doctest_optionflags") + flag_lookup_table = _get_flag_lookup() + flag_acc = 0 + for flag in optionflags_str: + flag_acc |= flag_lookup_table[flag] + return flag_acc + + +def _get_continue_on_failure(config): + continue_on_failure = config.getvalue("doctest_continue_on_failure") + if continue_on_failure: + # We need to turn off this if we use pdb since we should stop at + # the first failure + if config.getvalue("usepdb"): + continue_on_failure = False + return continue_on_failure + + +class DoctestTextfile(pytest.Module): + obj = None + + def collect(self): + import doctest + + # inspired by doctest.testfile; ideally we would use it directly, + # but it doesn't support passing a custom checker + encoding = self.config.getini("doctest_encoding") + text = self.fspath.read_text(encoding) + filename = str(self.fspath) + name = self.fspath.basename + globs = {"__name__": "__main__"} + + optionflags = get_optionflags(self) + + runner = _get_runner( + verbose=0, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + _fix_spoof_python2(runner, encoding) + + parser = doctest.DocTestParser() + test = parser.get_doctest(text, globs, name, filename, 0) + if test.examples: + yield DoctestItem(test.name, self, runner, test) + + +def _check_all_skipped(test): + """raises pytest.skip() if all examples in the given DocTest have the SKIP + option set. + """ + import doctest + + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) + if all_skipped: + pytest.skip("all tests skipped by +SKIP option") + + +def _is_mocked(obj): + """ + returns if a object is possibly a mock object by checking the existence of a highly improbable attribute + """ + return ( + safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None) + is not None + ) + + +@contextmanager +def _patch_unwrap_mock_aware(): + """ + contextmanager which replaces ``inspect.unwrap`` with a version + that's aware of mock objects and doesn't recurse on them + """ + real_unwrap = getattr(inspect, "unwrap", None) + if real_unwrap is None: + yield + else: + + def _mock_aware_unwrap(obj, stop=None): + if stop is None: + return real_unwrap(obj, stop=_is_mocked) + else: + return real_unwrap(obj, stop=lambda obj: _is_mocked(obj) or stop(obj)) + + inspect.unwrap = _mock_aware_unwrap + try: + yield + finally: + inspect.unwrap = real_unwrap + + +class DoctestModule(pytest.Module): + def collect(self): + import doctest + + class MockAwareDocTestFinder(doctest.DocTestFinder): + """ + a hackish doctest finder that overrides stdlib internals to fix a stdlib bug + + https://github.com/pytest-dev/pytest/issues/3456 + https://bugs.python.org/issue25532 + """ + + def _find(self, tests, obj, name, module, source_lines, globs, seen): + if _is_mocked(obj): + return + with _patch_unwrap_mock_aware(): + + doctest.DocTestFinder._find( + self, tests, obj, name, module, source_lines, globs, seen + ) + + if self.fspath.basename == "conftest.py": + module = self.config.pluginmanager._importconftest(self.fspath) + else: + try: + module = self.fspath.pyimport() + except ImportError: + if self.config.getvalue("doctest_ignore_import_errors"): + pytest.skip("unable to import module %r" % self.fspath) + else: + raise + # uses internal doctest module parsing mechanism + finder = MockAwareDocTestFinder() + optionflags = get_optionflags(self) + runner = _get_runner( + verbose=0, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + for test in finder.find(module, module.__name__): + if test.examples: # skip empty doctests + yield DoctestItem(test.name, self, runner, test) + + +def _setup_fixtures(doctest_item): + """ + Used by DoctestTextfile and DoctestItem to setup fixture information. + """ + + def func(): + pass + + doctest_item.funcargs = {} + fm = doctest_item.session._fixturemanager + doctest_item._fixtureinfo = fm.getfixtureinfo( + node=doctest_item, func=func, cls=None, funcargs=False + ) + fixture_request = FixtureRequest(doctest_item) + fixture_request._fillfixtures() + return fixture_request + + +def _get_checker(): + """ + Returns a doctest.OutputChecker subclass that takes in account the + ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES + to strip b'' prefixes. + Useful when the same doctest should run in Python 2 and Python 3. + + An inner class is used to avoid importing "doctest" at the module + level. + """ + if hasattr(_get_checker, "LiteralsOutputChecker"): + return _get_checker.LiteralsOutputChecker() + + import doctest + import re + + class LiteralsOutputChecker(doctest.OutputChecker): + """ + Copied from doctest_nose_plugin.py from the nltk project: + https://github.com/nltk/nltk + + Further extended to also support byte literals. + """ + + _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) + + def check_output(self, want, got, optionflags): + res = doctest.OutputChecker.check_output(self, want, got, optionflags) + if res: + return True + + allow_unicode = optionflags & _get_allow_unicode_flag() + allow_bytes = optionflags & _get_allow_bytes_flag() + if not allow_unicode and not allow_bytes: + return False + + else: # pragma: no cover + + def remove_prefixes(regex, txt): + return re.sub(regex, r"\1\2", txt) + + if allow_unicode: + want = remove_prefixes(self._unicode_literal_re, want) + got = remove_prefixes(self._unicode_literal_re, got) + if allow_bytes: + want = remove_prefixes(self._bytes_literal_re, want) + got = remove_prefixes(self._bytes_literal_re, got) + res = doctest.OutputChecker.check_output(self, want, got, optionflags) + return res + + _get_checker.LiteralsOutputChecker = LiteralsOutputChecker + return _get_checker.LiteralsOutputChecker() + + +def _get_allow_unicode_flag(): + """ + Registers and returns the ALLOW_UNICODE flag. + """ + import doctest + + return doctest.register_optionflag("ALLOW_UNICODE") + + +def _get_allow_bytes_flag(): + """ + Registers and returns the ALLOW_BYTES flag. + """ + import doctest + + return doctest.register_optionflag("ALLOW_BYTES") + + +def _get_report_choice(key): + """ + This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid + importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests. + """ + import doctest + + return { + DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, + DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, + DOCTEST_REPORT_CHOICE_NONE: 0, + }[key] + + +def _fix_spoof_python2(runner, encoding): + """ + Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This + should patch only doctests for text files because they don't have a way to declare their + encoding. Doctests in docstrings from Python modules don't have the same problem given that + Python already decoded the strings. + + This fixes the problem related in issue #2434. + """ + from _pytest.compat import _PY2 + + if not _PY2: + return + + from doctest import _SpoofOut + + class UnicodeSpoof(_SpoofOut): + def getvalue(self): + result = _SpoofOut.getvalue(self) + if encoding and isinstance(result, bytes): + result = result.decode(encoding) + return result + + runner._fakeout = UnicodeSpoof() + + +@pytest.fixture(scope="session") +def doctest_namespace(): + """ + Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. + """ + return dict() diff --git a/env_27/lib/python2.7/site-packages/_pytest/fixtures.py b/env_27/lib/python2.7/site-packages/_pytest/fixtures.py new file mode 100644 index 0000000..0bb525a --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/fixtures.py @@ -0,0 +1,1321 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import inspect +import itertools +import sys +import warnings +from collections import defaultdict +from collections import deque +from collections import OrderedDict + +import attr +import py +import six + +import _pytest +from _pytest import nodes +from _pytest._code.code import FormattedExcinfo +from _pytest._code.code import TerminalRepr +from _pytest.compat import _format_args +from _pytest.compat import _PytestWrapper +from _pytest.compat import exc_clear +from _pytest.compat import FuncargnamesCompatAttr +from _pytest.compat import get_real_func +from _pytest.compat import get_real_method +from _pytest.compat import getfslineno +from _pytest.compat import getfuncargnames +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import is_generator +from _pytest.compat import isclass +from _pytest.compat import NOTSET +from _pytest.compat import safe_getattr +from _pytest.deprecated import FIXTURE_FUNCTION_CALL +from _pytest.deprecated import FIXTURE_NAMED_REQUEST +from _pytest.outcomes import fail +from _pytest.outcomes import TEST_OUTCOME + + +@attr.s(frozen=True) +class PseudoFixtureDef(object): + cached_result = attr.ib() + scope = attr.ib() + + +def pytest_sessionstart(session): + import _pytest.python + import _pytest.nodes + + scopename2class.update( + { + "package": _pytest.python.Package, + "class": _pytest.python.Class, + "module": _pytest.python.Module, + "function": _pytest.nodes.Item, + "session": _pytest.main.Session, + } + ) + session._fixturemanager = FixtureManager(session) + + +scopename2class = {} + + +scope2props = dict(session=()) +scope2props["package"] = ("fspath",) +scope2props["module"] = ("fspath", "module") +scope2props["class"] = scope2props["module"] + ("cls",) +scope2props["instance"] = scope2props["class"] + ("instance",) +scope2props["function"] = scope2props["instance"] + ("function", "keywords") + + +def scopeproperty(name=None, doc=None): + def decoratescope(func): + scopename = name or func.__name__ + + def provide(self): + if func.__name__ in scope2props[self.scope]: + return func(self) + raise AttributeError( + "%s not available in %s-scoped context" % (scopename, self.scope) + ) + + return property(provide, None, None, func.__doc__) + + return decoratescope + + +def get_scope_package(node, fixturedef): + import pytest + + cls = pytest.Package + current = node + fixture_package_name = "%s/%s" % (fixturedef.baseid, "__init__.py") + while current and ( + type(current) is not cls or fixture_package_name != current.nodeid + ): + current = current.parent + if current is None: + return node.session + return current + + +def get_scope_node(node, scope): + cls = scopename2class.get(scope) + if cls is None: + raise ValueError("unknown scope") + return node.getparent(cls) + + +def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): + # this function will transform all collected calls to a functions + # if they use direct funcargs (i.e. direct parametrization) + # because we want later test execution to be able to rely on + # an existing FixtureDef structure for all arguments. + # XXX we can probably avoid this algorithm if we modify CallSpec2 + # to directly care for creating the fixturedefs within its methods. + if not metafunc._calls[0].funcargs: + return # this function call does not have direct parametrization + # collect funcargs of all callspecs into a list of values + arg2params = {} + arg2scope = {} + for callspec in metafunc._calls: + for argname, argvalue in callspec.funcargs.items(): + assert argname not in callspec.params + callspec.params[argname] = argvalue + arg2params_list = arg2params.setdefault(argname, []) + callspec.indices[argname] = len(arg2params_list) + arg2params_list.append(argvalue) + if argname not in arg2scope: + scopenum = callspec._arg2scopenum.get(argname, scopenum_function) + arg2scope[argname] = scopes[scopenum] + callspec.funcargs.clear() + + # register artificial FixtureDef's so that later at test execution + # time we can rely on a proper FixtureDef to exist for fixture setup. + arg2fixturedefs = metafunc._arg2fixturedefs + for argname, valuelist in arg2params.items(): + # if we have a scope that is higher than function we need + # to make sure we only ever create an according fixturedef on + # a per-scope basis. We thus store and cache the fixturedef on the + # node related to the scope. + scope = arg2scope[argname] + node = None + if scope != "function": + node = get_scope_node(collector, scope) + if node is None: + assert scope == "class" and isinstance(collector, _pytest.python.Module) + # use module-level collector for class-scope (for now) + node = collector + if node and argname in node._name2pseudofixturedef: + arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] + else: + fixturedef = FixtureDef( + fixturemanager, + "", + argname, + get_direct_param_fixture_func, + arg2scope[argname], + valuelist, + False, + False, + ) + arg2fixturedefs[argname] = [fixturedef] + if node is not None: + node._name2pseudofixturedef[argname] = fixturedef + + +def getfixturemarker(obj): + """ return fixturemarker or None if it doesn't exist or raised + exceptions.""" + try: + return getattr(obj, "_pytestfixturefunction", None) + except TEST_OUTCOME: + # some objects raise errors like request (from flask import request) + # we don't expect them to be fixture functions + return None + + +def get_parametrized_fixture_keys(item, scopenum): + """ return list of keys for all parametrized arguments which match + the specified scope. """ + assert scopenum < scopenum_function # function + try: + cs = item.callspec + except AttributeError: + pass + else: + # cs.indices.items() is random order of argnames. Need to + # sort this so that different calls to + # get_parametrized_fixture_keys will be deterministic. + for argname, param_index in sorted(cs.indices.items()): + if cs._arg2scopenum[argname] != scopenum: + continue + if scopenum == 0: # session + key = (argname, param_index) + elif scopenum == 1: # package + key = (argname, param_index, item.fspath.dirpath()) + elif scopenum == 2: # module + key = (argname, param_index, item.fspath) + elif scopenum == 3: # class + key = (argname, param_index, item.fspath, item.cls) + yield key + + +# algorithm for sorting on a per-parametrized resource setup basis +# it is called for scopenum==0 (session) first and performs sorting +# down to the lower scopes such as to minimize number of "high scope" +# setups and teardowns + + +def reorder_items(items): + argkeys_cache = {} + items_by_argkey = {} + for scopenum in range(0, scopenum_function): + argkeys_cache[scopenum] = d = {} + items_by_argkey[scopenum] = item_d = defaultdict(deque) + for item in items: + keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum)) + if keys: + d[item] = keys + for key in keys: + item_d[key].append(item) + items = OrderedDict.fromkeys(items) + return list(reorder_items_atscope(items, argkeys_cache, items_by_argkey, 0)) + + +def fix_cache_order(item, argkeys_cache, items_by_argkey): + for scopenum in range(0, scopenum_function): + for key in argkeys_cache[scopenum].get(item, []): + items_by_argkey[scopenum][key].appendleft(item) + + +def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum): + if scopenum >= scopenum_function or len(items) < 3: + return items + ignore = set() + items_deque = deque(items) + items_done = OrderedDict() + scoped_items_by_argkey = items_by_argkey[scopenum] + scoped_argkeys_cache = argkeys_cache[scopenum] + while items_deque: + no_argkey_group = OrderedDict() + slicing_argkey = None + while items_deque: + item = items_deque.popleft() + if item in items_done or item in no_argkey_group: + continue + argkeys = OrderedDict.fromkeys( + k for k in scoped_argkeys_cache.get(item, []) if k not in ignore + ) + if not argkeys: + no_argkey_group[item] = None + else: + slicing_argkey, _ = argkeys.popitem() + # we don't have to remove relevant items from later in the deque because they'll just be ignored + matching_items = [ + i for i in scoped_items_by_argkey[slicing_argkey] if i in items + ] + for i in reversed(matching_items): + fix_cache_order(i, argkeys_cache, items_by_argkey) + items_deque.appendleft(i) + break + if no_argkey_group: + no_argkey_group = reorder_items_atscope( + no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1 + ) + for item in no_argkey_group: + items_done[item] = None + ignore.add(slicing_argkey) + return items_done + + +def fillfixtures(function): + """ fill missing funcargs for a test function. """ + try: + request = function._request + except AttributeError: + # XXX this special code path is only expected to execute + # with the oejskit plugin. It uses classes with funcargs + # and we thus have to work a bit to allow this. + fm = function.session._fixturemanager + fi = fm.getfixtureinfo(function.parent, function.obj, None) + function._fixtureinfo = fi + request = function._request = FixtureRequest(function) + request._fillfixtures() + # prune out funcargs for jstests + newfuncargs = {} + for name in fi.argnames: + newfuncargs[name] = function.funcargs[name] + function.funcargs = newfuncargs + else: + request._fillfixtures() + + +def get_direct_param_fixture_func(request): + return request.param + + +@attr.s(slots=True) +class FuncFixtureInfo(object): + # original function argument names + argnames = attr.ib(type=tuple) + # argnames that function immediately requires. These include argnames + + # fixture names specified via usefixtures and via autouse=True in fixture + # definitions. + initialnames = attr.ib(type=tuple) + names_closure = attr.ib() # List[str] + name2fixturedefs = attr.ib() # List[str, List[FixtureDef]] + + def prune_dependency_tree(self): + """Recompute names_closure from initialnames and name2fixturedefs + + Can only reduce names_closure, which means that the new closure will + always be a subset of the old one. The order is preserved. + + This method is needed because direct parametrization may shadow some + of the fixtures that were included in the originally built dependency + tree. In this way the dependency tree can get pruned, and the closure + of argnames may get reduced. + """ + closure = set() + working_set = set(self.initialnames) + while working_set: + argname = working_set.pop() + # argname may be smth not included in the original names_closure, + # in which case we ignore it. This currently happens with pseudo + # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'. + # So they introduce the new dependency 'request' which might have + # been missing in the original tree (closure). + if argname not in closure and argname in self.names_closure: + closure.add(argname) + if argname in self.name2fixturedefs: + working_set.update(self.name2fixturedefs[argname][-1].argnames) + + self.names_closure[:] = sorted(closure, key=self.names_closure.index) + + +class FixtureRequest(FuncargnamesCompatAttr): + """ A request for a fixture from a test or fixture function. + + A request object gives access to the requesting test context + and has an optional ``param`` attribute in case + the fixture is parametrized indirectly. + """ + + def __init__(self, pyfuncitem): + self._pyfuncitem = pyfuncitem + #: fixture for which this request is being performed + self.fixturename = None + #: Scope string, one of "function", "class", "module", "session" + self.scope = "function" + self._fixture_defs = {} # argname -> FixtureDef + fixtureinfo = pyfuncitem._fixtureinfo + self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() + self._arg2index = {} + self._fixturemanager = pyfuncitem.session._fixturemanager + + @property + def fixturenames(self): + """names of all active fixtures in this request""" + result = list(self._pyfuncitem._fixtureinfo.names_closure) + result.extend(set(self._fixture_defs).difference(result)) + return result + + @property + def node(self): + """ underlying collection node (depends on current request scope)""" + return self._getscopeitem(self.scope) + + def _getnextfixturedef(self, argname): + fixturedefs = self._arg2fixturedefs.get(argname, None) + if fixturedefs is None: + # we arrive here because of a dynamic call to + # getfixturevalue(argname) usage which was naturally + # not known at parsing/collection time + parentid = self._pyfuncitem.parent.nodeid + fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) + self._arg2fixturedefs[argname] = fixturedefs + # fixturedefs list is immutable so we maintain a decreasing index + index = self._arg2index.get(argname, 0) - 1 + if fixturedefs is None or (-index > len(fixturedefs)): + raise FixtureLookupError(argname, self) + self._arg2index[argname] = index + return fixturedefs[index] + + @property + def config(self): + """ the pytest config object associated with this request. """ + return self._pyfuncitem.config + + @scopeproperty() + def function(self): + """ test function object if the request has a per-function scope. """ + return self._pyfuncitem.obj + + @scopeproperty("class") + def cls(self): + """ class (can be None) where the test function was collected. """ + clscol = self._pyfuncitem.getparent(_pytest.python.Class) + if clscol: + return clscol.obj + + @property + def instance(self): + """ instance (can be None) on which test function was collected. """ + # unittest support hack, see _pytest.unittest.TestCaseFunction + try: + return self._pyfuncitem._testcase + except AttributeError: + function = getattr(self, "function", None) + return getattr(function, "__self__", None) + + @scopeproperty() + def module(self): + """ python module object where the test function was collected. """ + return self._pyfuncitem.getparent(_pytest.python.Module).obj + + @scopeproperty() + def fspath(self): + """ the file system path of the test module which collected this test. """ + return self._pyfuncitem.fspath + + @property + def keywords(self): + """ keywords/markers dictionary for the underlying node. """ + return self.node.keywords + + @property + def session(self): + """ pytest session object. """ + return self._pyfuncitem.session + + def addfinalizer(self, finalizer): + """ add finalizer/teardown function to be called after the + last test within the requesting test context finished + execution. """ + # XXX usually this method is shadowed by fixturedef specific ones + self._addfinalizer(finalizer, scope=self.scope) + + def _addfinalizer(self, finalizer, scope): + colitem = self._getscopeitem(scope) + self._pyfuncitem.session._setupstate.addfinalizer( + finalizer=finalizer, colitem=colitem + ) + + def applymarker(self, marker): + """ Apply a marker to a single test function invocation. + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object + created by a call to ``pytest.mark.NAME(...)``. + """ + self.node.add_marker(marker) + + def raiseerror(self, msg): + """ raise a FixtureLookupError with the given message. """ + raise self._fixturemanager.FixtureLookupError(None, self, msg) + + def _fillfixtures(self): + item = self._pyfuncitem + fixturenames = getattr(item, "fixturenames", self.fixturenames) + for argname in fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def getfixturevalue(self, argname): + """ Dynamically run a named fixture function. + + Declaring fixtures via function argument is recommended where possible. + But if you can only decide whether to use another fixture at test + setup time, you may use this function to retrieve it inside a fixture + or test function body. + """ + return self._get_active_fixturedef(argname).cached_result[0] + + def getfuncargvalue(self, argname): + """ Deprecated, use getfixturevalue. """ + from _pytest import deprecated + + warnings.warn(deprecated.GETFUNCARGVALUE, stacklevel=2) + return self.getfixturevalue(argname) + + def _get_active_fixturedef(self, argname): + try: + return self._fixture_defs[argname] + except KeyError: + try: + fixturedef = self._getnextfixturedef(argname) + except FixtureLookupError: + if argname == "request": + cached_result = (self, [0], None) + scope = "function" + return PseudoFixtureDef(cached_result, scope) + raise + # remove indent to prevent the python3 exception + # from leaking into the call + self._compute_fixture_value(fixturedef) + self._fixture_defs[argname] = fixturedef + return fixturedef + + def _get_fixturestack(self): + current = self + values = [] + while 1: + fixturedef = getattr(current, "_fixturedef", None) + if fixturedef is None: + values.reverse() + return values + values.append(fixturedef) + current = current._parent_request + + def _compute_fixture_value(self, fixturedef): + """ + Creates a SubRequest based on "self" and calls the execute method of the given fixturedef object. This will + force the FixtureDef object to throw away any previous results and compute a new fixture value, which + will be stored into the FixtureDef object itself. + + :param FixtureDef fixturedef: + """ + # prepare a subrequest object before calling fixture function + # (latter managed by fixturedef) + argname = fixturedef.argname + funcitem = self._pyfuncitem + scope = fixturedef.scope + try: + param = funcitem.callspec.getparam(argname) + except (AttributeError, ValueError): + param = NOTSET + param_index = 0 + has_params = fixturedef.params is not None + fixtures_not_supported = getattr(funcitem, "nofuncargs", False) + if has_params and fixtures_not_supported: + msg = ( + "{name} does not support fixtures, maybe unittest.TestCase subclass?\n" + "Node id: {nodeid}\n" + "Function type: {typename}" + ).format( + name=funcitem.name, + nodeid=funcitem.nodeid, + typename=type(funcitem).__name__, + ) + fail(msg, pytrace=False) + if has_params: + frame = inspect.stack()[3] + frameinfo = inspect.getframeinfo(frame[0]) + source_path = frameinfo.filename + source_lineno = frameinfo.lineno + source_path = py.path.local(source_path) + if source_path.relto(funcitem.config.rootdir): + source_path = source_path.relto(funcitem.config.rootdir) + msg = ( + "The requested fixture has no parameter defined for test:\n" + " {}\n\n" + "Requested fixture '{}' defined in:\n{}" + "\n\nRequested here:\n{}:{}".format( + funcitem.nodeid, + fixturedef.argname, + getlocation(fixturedef.func, funcitem.config.rootdir), + source_path, + source_lineno, + ) + ) + fail(msg, pytrace=False) + else: + param_index = funcitem.callspec.indices[argname] + # if a parametrize invocation set a scope it will override + # the static scope defined with the fixture function + paramscopenum = funcitem.callspec._arg2scopenum.get(argname) + if paramscopenum is not None: + scope = scopes[paramscopenum] + + subrequest = SubRequest(self, scope, param, param_index, fixturedef) + + # check if a higher-level scoped fixture accesses a lower level one + subrequest._check_scope(argname, self.scope, scope) + + # clear sys.exc_info before invoking the fixture (python bug?) + # if it's not explicitly cleared it will leak into the call + exc_clear() + try: + # call the fixture function + fixturedef.execute(request=subrequest) + finally: + self._schedule_finalizers(fixturedef, subrequest) + + def _schedule_finalizers(self, fixturedef, subrequest): + # if fixture function failed it might have registered finalizers + self.session._setupstate.addfinalizer( + functools.partial(fixturedef.finish, request=subrequest), subrequest.node + ) + + def _check_scope(self, argname, invoking_scope, requested_scope): + if argname == "request": + return + if scopemismatch(invoking_scope, requested_scope): + # try to report something helpful + lines = self._factorytraceback() + fail( + "ScopeMismatch: You tried to access the %r scoped " + "fixture %r with a %r scoped request object, " + "involved factories\n%s" + % ((requested_scope, argname, invoking_scope, "\n".join(lines))), + pytrace=False, + ) + + def _factorytraceback(self): + lines = [] + for fixturedef in self._get_fixturestack(): + factory = fixturedef.func + fs, lineno = getfslineno(factory) + p = self._pyfuncitem.session.fspath.bestrelpath(fs) + args = _format_args(factory) + lines.append("%s:%d: def %s%s" % (p, lineno + 1, factory.__name__, args)) + return lines + + def _getscopeitem(self, scope): + if scope == "function": + # this might also be a non-function Item despite its attribute name + return self._pyfuncitem + if scope == "package": + node = get_scope_package(self._pyfuncitem, self._fixturedef) + else: + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope == "class": + # fallback to function item itself + node = self._pyfuncitem + assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format( + scope, self._pyfuncitem + ) + return node + + def __repr__(self): + return "" % (self.node) + + +class SubRequest(FixtureRequest): + """ a sub request for handling getting a fixture from a + test function/fixture. """ + + def __init__(self, request, scope, param, param_index, fixturedef): + self._parent_request = request + self.fixturename = fixturedef.argname + if param is not NOTSET: + self.param = param + self.param_index = param_index + self.scope = scope + self._fixturedef = fixturedef + self._pyfuncitem = request._pyfuncitem + self._fixture_defs = request._fixture_defs + self._arg2fixturedefs = request._arg2fixturedefs + self._arg2index = request._arg2index + self._fixturemanager = request._fixturemanager + + def __repr__(self): + return "" % (self.fixturename, self._pyfuncitem) + + def addfinalizer(self, finalizer): + self._fixturedef.addfinalizer(finalizer) + + def _schedule_finalizers(self, fixturedef, subrequest): + # if the executing fixturedef was not explicitly requested in the argument list (via + # getfixturevalue inside the fixture call) then ensure this fixture def will be finished + # first + if fixturedef.argname not in self.funcargnames: + fixturedef.addfinalizer( + functools.partial(self._fixturedef.finish, request=self) + ) + super(SubRequest, self)._schedule_finalizers(fixturedef, subrequest) + + +scopes = "session package module class function".split() +scopenum_function = scopes.index("function") + + +def scopemismatch(currentscope, newscope): + return scopes.index(newscope) > scopes.index(currentscope) + + +def scope2index(scope, descr, where=None): + """Look up the index of ``scope`` and raise a descriptive value error + if not defined. + """ + try: + return scopes.index(scope) + except ValueError: + fail( + "{} {}got an unexpected scope value '{}'".format( + descr, "from {} ".format(where) if where else "", scope + ), + pytrace=False, + ) + + +class FixtureLookupError(LookupError): + """ could not return a requested Fixture (missing or invalid). """ + + def __init__(self, argname, request, msg=None): + self.argname = argname + self.request = request + self.fixturestack = request._get_fixturestack() + self.msg = msg + + def formatrepr(self): + tblines = [] + addline = tblines.append + stack = [self.request._pyfuncitem.obj] + stack.extend(map(lambda x: x.func, self.fixturestack)) + msg = self.msg + if msg is not None: + # the last fixture raise an error, let's present + # it at the requesting side + stack = stack[:-1] + for function in stack: + fspath, lineno = getfslineno(function) + try: + lines, _ = inspect.getsourcelines(get_real_func(function)) + except (IOError, IndexError, TypeError): + error_msg = "file %s, line %s: source code not available" + addline(error_msg % (fspath, lineno + 1)) + else: + addline("file %s, line %s" % (fspath, lineno + 1)) + for i, line in enumerate(lines): + line = line.rstrip() + addline(" " + line) + if line.lstrip().startswith("def"): + break + + if msg is None: + fm = self.request._fixturemanager + available = set() + parentid = self.request._pyfuncitem.parent.nodeid + for name, fixturedefs in fm._arg2fixturedefs.items(): + faclist = list(fm._matchfactories(fixturedefs, parentid)) + if faclist: + available.add(name) + if self.argname in available: + msg = " recursive dependency involving fixture '{}' detected".format( + self.argname + ) + else: + msg = "fixture '{}' not found".format(self.argname) + msg += "\n available fixtures: {}".format(", ".join(sorted(available))) + msg += "\n use 'pytest --fixtures [testpath]' for help on them." + + return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) + + +class FixtureLookupErrorRepr(TerminalRepr): + def __init__(self, filename, firstlineno, tblines, errorstring, argname): + self.tblines = tblines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + self.argname = argname + + def toterminal(self, tw): + # tw.line("FixtureLookupError: %s" %(self.argname), red=True) + for tbline in self.tblines: + tw.line(tbline.rstrip()) + lines = self.errorstring.split("\n") + if lines: + tw.line( + "{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()), + red=True, + ) + for line in lines[1:]: + tw.line( + "{} {}".format(FormattedExcinfo.flow_marker, line.strip()), + red=True, + ) + tw.line() + tw.line("%s:%d" % (self.filename, self.firstlineno + 1)) + + +def fail_fixturefunc(fixturefunc, msg): + fs, lineno = getfslineno(fixturefunc) + location = "%s:%s" % (fs, lineno + 1) + source = _pytest._code.Source(fixturefunc) + fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) + + +def call_fixture_func(fixturefunc, request, kwargs): + yieldctx = is_generator(fixturefunc) + if yieldctx: + it = fixturefunc(**kwargs) + res = next(it) + finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, it) + request.addfinalizer(finalizer) + else: + res = fixturefunc(**kwargs) + return res + + +def _teardown_yield_fixture(fixturefunc, it): + """Executes the teardown of a fixture function by advancing the iterator after the + yield and ensure the iteration ends (if not it means there is more than one yield in the function)""" + try: + next(it) + except StopIteration: + pass + else: + fail_fixturefunc( + fixturefunc, "yield_fixture function has more than one 'yield'" + ) + + +class FixtureDef(object): + """ A container for a factory definition. """ + + def __init__( + self, + fixturemanager, + baseid, + argname, + func, + scope, + params, + unittest=False, + ids=None, + ): + self._fixturemanager = fixturemanager + self.baseid = baseid or "" + self.has_location = baseid is not None + self.func = func + self.argname = argname + self.scope = scope + self.scopenum = scope2index( + scope or "function", + descr="Fixture '{}'".format(func.__name__), + where=baseid, + ) + self.params = params + self.argnames = getfuncargnames(func, is_method=unittest) + self.unittest = unittest + self.ids = ids + self._finalizers = [] + + def addfinalizer(self, finalizer): + self._finalizers.append(finalizer) + + def finish(self, request): + exceptions = [] + try: + while self._finalizers: + try: + func = self._finalizers.pop() + func() + except: # noqa + exceptions.append(sys.exc_info()) + if exceptions: + e = exceptions[0] + del exceptions # ensure we don't keep all frames alive because of the traceback + six.reraise(*e) + + finally: + hook = self._fixturemanager.session.gethookproxy(request.node.fspath) + hook.pytest_fixture_post_finalizer(fixturedef=self, request=request) + # even if finalization fails, we invalidate + # the cached fixture value and remove + # all finalizers because they may be bound methods which will + # keep instances alive + if hasattr(self, "cached_result"): + del self.cached_result + self._finalizers = [] + + def execute(self, request): + # get required arguments and register our own finish() + # with their finalization + for argname in self.argnames: + fixturedef = request._get_active_fixturedef(argname) + if argname != "request": + fixturedef.addfinalizer(functools.partial(self.finish, request=request)) + + my_cache_key = request.param_index + cached_result = getattr(self, "cached_result", None) + if cached_result is not None: + result, cache_key, err = cached_result + if my_cache_key == cache_key: + if err is not None: + six.reraise(*err) + else: + return result + # we have a previous but differently parametrized fixture instance + # so we need to tear it down before creating a new one + self.finish(request) + assert not hasattr(self, "cached_result") + + hook = self._fixturemanager.session.gethookproxy(request.node.fspath) + return hook.pytest_fixture_setup(fixturedef=self, request=request) + + def __repr__(self): + return "" % ( + self.argname, + self.scope, + self.baseid, + ) + + +def resolve_fixture_function(fixturedef, request): + """Gets the actual callable that can be called to obtain the fixture value, dealing with unittest-specific + instances and bound methods. + """ + fixturefunc = fixturedef.func + if fixturedef.unittest: + if request.instance is not None: + # bind the unbound method to the TestCase instance + fixturefunc = fixturedef.func.__get__(request.instance) + else: + # the fixture function needs to be bound to the actual + # request.instance so that code working with "fixturedef" behaves + # as expected. + if request.instance is not None: + fixturefunc = getimfunc(fixturedef.func) + if fixturefunc != fixturedef.func: + fixturefunc = fixturefunc.__get__(request.instance) + return fixturefunc + + +def pytest_fixture_setup(fixturedef, request): + """ Execution of fixture setup. """ + kwargs = {} + for argname in fixturedef.argnames: + fixdef = request._get_active_fixturedef(argname) + result, arg_cache_key, exc = fixdef.cached_result + request._check_scope(argname, request.scope, fixdef.scope) + kwargs[argname] = result + + fixturefunc = resolve_fixture_function(fixturedef, request) + my_cache_key = request.param_index + try: + result = call_fixture_func(fixturefunc, request, kwargs) + except TEST_OUTCOME: + fixturedef.cached_result = (None, my_cache_key, sys.exc_info()) + raise + fixturedef.cached_result = (result, my_cache_key, None) + return result + + +def _ensure_immutable_ids(ids): + if ids is None: + return + if callable(ids): + return ids + return tuple(ids) + + +def wrap_function_to_error_out_if_called_directly(function, fixture_marker): + """Wrap the given fixture function so we can raise an error about it being called directly, + instead of used as an argument in a test function. + """ + message = FIXTURE_FUNCTION_CALL.format( + name=fixture_marker.name or function.__name__ + ) + + @six.wraps(function) + def result(*args, **kwargs): + fail(message, pytrace=False) + + # keep reference to the original function in our own custom attribute so we don't unwrap + # further than this point and lose useful wrappings like @mock.patch (#3774) + result.__pytest_wrapped__ = _PytestWrapper(function) + + return result + + +@attr.s(frozen=True) +class FixtureFunctionMarker(object): + scope = attr.ib() + params = attr.ib(converter=attr.converters.optional(tuple)) + autouse = attr.ib(default=False) + ids = attr.ib(default=None, converter=_ensure_immutable_ids) + name = attr.ib(default=None) + + def __call__(self, function): + if isclass(function): + raise ValueError("class fixtures not supported (maybe in the future)") + + if getattr(function, "_pytestfixturefunction", False): + raise ValueError( + "fixture is being applied more than once to the same function" + ) + + function = wrap_function_to_error_out_if_called_directly(function, self) + + name = self.name or function.__name__ + if name == "request": + warnings.warn(FIXTURE_NAMED_REQUEST) + function._pytestfixturefunction = self + return function + + +def fixture(scope="function", params=None, autouse=False, ids=None, name=None): + """Decorator to mark a fixture factory function. + + This decorator can be used, with or without parameters, to define a + fixture function. + + The name of the fixture function can later be referenced to cause its + invocation ahead of running tests: test + modules or classes can use the ``pytest.mark.usefixtures(fixturename)`` + marker. + + Test functions can directly use fixture names as input + arguments in which case the fixture instance returned from the fixture + function will be injected. + + Fixtures can provide their values to test functions using ``return`` or ``yield`` + statements. When using ``yield`` the code block after the ``yield`` statement is executed + as teardown code regardless of the test outcome, and must yield exactly once. + + :arg scope: the scope for which this fixture is shared, one of + ``"function"`` (default), ``"class"``, ``"module"``, + ``"package"`` or ``"session"``. + + ``"package"`` is considered **experimental** at this time. + + :arg params: an optional list of parameters which will cause multiple + invocations of the fixture function and all of the tests + using it. + + :arg autouse: if True, the fixture func is activated for all tests that + can see it. If False (the default) then an explicit + reference is needed to activate the fixture. + + :arg ids: list of string ids each corresponding to the params + so that they are part of the test id. If no ids are provided + they will be generated automatically from the params. + + :arg name: the name of the fixture. This defaults to the name of the + decorated function. If a fixture is used in the same module in + which it is defined, the function name of the fixture will be + shadowed by the function arg that requests the fixture; one way + to resolve this is to name the decorated function + ``fixture_`` and then use + ``@pytest.fixture(name='')``. + """ + if callable(scope) and params is None and autouse is False: + # direct decoration + return FixtureFunctionMarker("function", params, autouse, name=name)(scope) + if params is not None and not isinstance(params, (list, tuple)): + params = list(params) + return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) + + +def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None): + """ (return a) decorator to mark a yield-fixture factory function. + + .. deprecated:: 3.0 + Use :py:func:`pytest.fixture` directly instead. + """ + return fixture(scope=scope, params=params, autouse=autouse, ids=ids, name=name) + + +defaultfuncargprefixmarker = fixture() + + +@fixture(scope="session") +def pytestconfig(request): + """Session-scoped fixture that returns the :class:`_pytest.config.Config` object. + + Example:: + + def test_foo(pytestconfig): + if pytestconfig.getoption("verbose") > 0: + ... + + """ + return request.config + + +class FixtureManager(object): + """ + pytest fixtures definitions and information is stored and managed + from this class. + + During collection fm.parsefactories() is called multiple times to parse + fixture function definitions into FixtureDef objects and internal + data structures. + + During collection of test functions, metafunc-mechanics instantiate + a FuncFixtureInfo object which is cached per node/func-name. + This FuncFixtureInfo object is later retrieved by Function nodes + which themselves offer a fixturenames attribute. + + The FuncFixtureInfo object holds information about fixtures and FixtureDefs + relevant for a particular function. An initial list of fixtures is + assembled like this: + + - ini-defined usefixtures + - autouse-marked fixtures along the collection chain up from the function + - usefixtures markers at module/class/function level + - test function funcargs + + Subsequently the funcfixtureinfo.fixturenames attribute is computed + as the closure of the fixtures needed to setup the initial fixtures, + i. e. fixtures needed by fixture functions themselves are appended + to the fixturenames list. + + Upon the test-setup phases all fixturenames are instantiated, retrieved + by a lookup of their FuncFixtureInfo. + """ + + FixtureLookupError = FixtureLookupError + FixtureLookupErrorRepr = FixtureLookupErrorRepr + + def __init__(self, session): + self.session = session + self.config = session.config + self._arg2fixturedefs = {} + self._holderobjseen = set() + self._arg2finish = {} + self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] + session.config.pluginmanager.register(self, "funcmanage") + + def getfixtureinfo(self, node, func, cls, funcargs=True): + if funcargs and not getattr(node, "nofuncargs", False): + argnames = getfuncargnames(func, cls=cls) + else: + argnames = () + usefixtures = itertools.chain.from_iterable( + mark.args for mark in node.iter_markers(name="usefixtures") + ) + initialnames = tuple(usefixtures) + argnames + fm = node.session._fixturemanager + initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure( + initialnames, node + ) + return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) + + def pytest_plugin_registered(self, plugin): + nodeid = None + try: + p = py.path.local(plugin.__file__).realpath() + except AttributeError: + pass + else: + # construct the base nodeid which is later used to check + # what fixtures are visible for particular tests (as denoted + # by their test id) + if p.basename.startswith("conftest.py"): + nodeid = p.dirpath().relto(self.config.rootdir) + if p.sep != nodes.SEP: + nodeid = nodeid.replace(p.sep, nodes.SEP) + + self.parsefactories(plugin, nodeid) + + def _getautousenames(self, nodeid): + """ return a tuple of fixture names to be used. """ + autousenames = [] + for baseid, basenames in self._nodeid_and_autousenames: + if nodeid.startswith(baseid): + if baseid: + i = len(baseid) + nextchar = nodeid[i : i + 1] + if nextchar and nextchar not in ":/": + continue + autousenames.extend(basenames) + return autousenames + + def getfixtureclosure(self, fixturenames, parentnode): + # collect the closure of all fixtures , starting with the given + # fixturenames as the initial set. As we have to visit all + # factory definitions anyway, we also return an arg2fixturedefs + # mapping so that the caller can reuse it and does not have + # to re-discover fixturedefs again for each fixturename + # (discovering matching fixtures for a given name/node is expensive) + + parentid = parentnode.nodeid + fixturenames_closure = self._getautousenames(parentid) + + def merge(otherlist): + for arg in otherlist: + if arg not in fixturenames_closure: + fixturenames_closure.append(arg) + + merge(fixturenames) + + # at this point, fixturenames_closure contains what we call "initialnames", + # which is a set of fixturenames the function immediately requests. We + # need to return it as well, so save this. + initialnames = tuple(fixturenames_closure) + + arg2fixturedefs = {} + lastlen = -1 + while lastlen != len(fixturenames_closure): + lastlen = len(fixturenames_closure) + for argname in fixturenames_closure: + if argname in arg2fixturedefs: + continue + fixturedefs = self.getfixturedefs(argname, parentid) + if fixturedefs: + arg2fixturedefs[argname] = fixturedefs + merge(fixturedefs[-1].argnames) + + def sort_by_scope(arg_name): + try: + fixturedefs = arg2fixturedefs[arg_name] + except KeyError: + return scopes.index("function") + else: + return fixturedefs[-1].scopenum + + fixturenames_closure.sort(key=sort_by_scope) + return initialnames, fixturenames_closure, arg2fixturedefs + + def pytest_generate_tests(self, metafunc): + for argname in metafunc.fixturenames: + faclist = metafunc._arg2fixturedefs.get(argname) + if faclist: + fixturedef = faclist[-1] + if fixturedef.params is not None: + markers = list(metafunc.definition.iter_markers("parametrize")) + for parametrize_mark in markers: + if "argnames" in parametrize_mark.kwargs: + argnames = parametrize_mark.kwargs["argnames"] + else: + argnames = parametrize_mark.args[0] + + if not isinstance(argnames, (tuple, list)): + argnames = [ + x.strip() for x in argnames.split(",") if x.strip() + ] + if argname in argnames: + break + else: + metafunc.parametrize( + argname, + fixturedef.params, + indirect=True, + scope=fixturedef.scope, + ids=fixturedef.ids, + ) + else: + continue # will raise FixtureLookupError at setup time + + def pytest_collection_modifyitems(self, items): + # separate parametrized setups + items[:] = reorder_items(items) + + def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): + if nodeid is not NOTSET: + holderobj = node_or_obj + else: + holderobj = node_or_obj.obj + nodeid = node_or_obj.nodeid + if holderobj in self._holderobjseen: + return + + self._holderobjseen.add(holderobj) + autousenames = [] + for name in dir(holderobj): + # The attribute can be an arbitrary descriptor, so the attribute + # access below can raise. safe_getatt() ignores such exceptions. + obj = safe_getattr(holderobj, name, None) + marker = getfixturemarker(obj) + if not isinstance(marker, FixtureFunctionMarker): + # magic globals with __getattr__ might have got us a wrong + # fixture attribute + continue + + if marker.name: + name = marker.name + + # during fixture definition we wrap the original fixture function + # to issue a warning if called directly, so here we unwrap it in order to not emit the warning + # when pytest itself calls the fixture function + if six.PY2 and unittest: + # hack on Python 2 because of the unbound methods + obj = get_real_func(obj) + else: + obj = get_real_method(obj, holderobj) + + fixture_def = FixtureDef( + self, + nodeid, + name, + obj, + marker.scope, + marker.params, + unittest=unittest, + ids=marker.ids, + ) + + faclist = self._arg2fixturedefs.setdefault(name, []) + if fixture_def.has_location: + faclist.append(fixture_def) + else: + # fixturedefs with no location are at the front + # so this inserts the current fixturedef after the + # existing fixturedefs from external plugins but + # before the fixturedefs provided in conftests. + i = len([f for f in faclist if not f.has_location]) + faclist.insert(i, fixture_def) + if marker.autouse: + autousenames.append(name) + + if autousenames: + self._nodeid_and_autousenames.append((nodeid or "", autousenames)) + + def getfixturedefs(self, argname, nodeid): + """ + Gets a list of fixtures which are applicable to the given node id. + + :param str argname: name of the fixture to search for + :param str nodeid: full node id of the requesting test. + :return: list[FixtureDef] + """ + try: + fixturedefs = self._arg2fixturedefs[argname] + except KeyError: + return None + return tuple(self._matchfactories(fixturedefs, nodeid)) + + def _matchfactories(self, fixturedefs, nodeid): + for fixturedef in fixturedefs: + if nodes.ischildnode(fixturedef.baseid, nodeid): + yield fixturedef diff --git a/env_27/lib/python2.7/site-packages/_pytest/freeze_support.py b/env_27/lib/python2.7/site-packages/_pytest/freeze_support.py new file mode 100644 index 0000000..5edf345 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/freeze_support.py @@ -0,0 +1,47 @@ +""" +Provides a function to report all internal modules for using freezing tools +pytest +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +def freeze_includes(): + """ + Returns a list of module names used by pytest that should be + included by cx_freeze. + """ + import py + import _pytest + + result = list(_iter_all_modules(py)) + result += list(_iter_all_modules(_pytest)) + return result + + +def _iter_all_modules(package, prefix=""): + """ + Iterates over the names of all modules that can be found in the given + package, recursively. + Example: + _iter_all_modules(_pytest) -> + ['_pytest.assertion.newinterpret', + '_pytest.capture', + '_pytest.core', + ... + ] + """ + import os + import pkgutil + + if type(package) is not str: + path, prefix = package.__path__[0], package.__name__ + "." + else: + path = package + for _, name, is_package in pkgutil.iter_modules([path]): + if is_package: + for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): + yield prefix + m + else: + yield prefix + name diff --git a/env_27/lib/python2.7/site-packages/_pytest/helpconfig.py b/env_27/lib/python2.7/site-packages/_pytest/helpconfig.py new file mode 100644 index 0000000..8117ee6 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/helpconfig.py @@ -0,0 +1,221 @@ +""" version info, help messages, tracing configuration. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +from argparse import Action + +import py + +import pytest +from _pytest.config import PrintHelp + + +class HelpAction(Action): + """This is an argparse Action that will raise an exception in + order to skip the rest of the argument parsing when --help is passed. + This prevents argparse from quitting due to missing required arguments + when any are defined, for example by ``pytest_addoption``. + This is similar to the way that the builtin argparse --help option is + implemented by raising SystemExit. + """ + + def __init__(self, option_strings, dest=None, default=False, help=None): + super(HelpAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + nargs=0, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + # We should only skip the rest of the parsing after preparse is done + if getattr(parser._parser, "after_preparse", False): + raise PrintHelp + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption( + "--version", + action="store_true", + help="display pytest lib version and import information.", + ) + group._addoption( + "-h", + "--help", + action=HelpAction, + dest="help", + help="show help message and configuration info", + ) + group._addoption( + "-p", + action="append", + dest="plugins", + default=[], + metavar="name", + help="early-load given plugin module name or entry point (multi-allowed). " + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`.", + ) + group.addoption( + "--traceconfig", + "--trace-config", + action="store_true", + default=False, + help="trace considerations of conftest.py files.", + ), + group.addoption( + "--debug", + action="store_true", + dest="debug", + default=False, + help="store internal tracing debug information in 'pytestdebug.log'.", + ) + group._addoption( + "-o", + "--override-ini", + dest="override_ini", + action="append", + help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.', + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_cmdline_parse(): + outcome = yield + config = outcome.get_result() + if config.option.debug: + path = os.path.abspath("pytestdebug.log") + debugfile = open(path, "w") + debugfile.write( + "versions pytest-%s, py-%s, " + "python-%s\ncwd=%s\nargs=%s\n\n" + % ( + pytest.__version__, + py.__version__, + ".".join(map(str, sys.version_info)), + os.getcwd(), + config._origargs, + ) + ) + config.trace.root.setwriter(debugfile.write) + undo_tracing = config.pluginmanager.enable_tracing() + sys.stderr.write("writing pytestdebug information to %s\n" % path) + + def unset_tracing(): + debugfile.close() + sys.stderr.write("wrote pytestdebug information to %s\n" % debugfile.name) + config.trace.root.setwriter(None) + undo_tracing() + + config.add_cleanup(unset_tracing) + + +def showversion(config): + p = py.path.local(pytest.__file__) + sys.stderr.write( + "This is pytest version %s, imported from %s\n" % (pytest.__version__, p) + ) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stderr.write(line + "\n") + + +def pytest_cmdline_main(config): + if config.option.version: + showversion(config) + return 0 + elif config.option.help: + config._do_configure() + showhelp(config) + config._ensure_unconfigure() + return 0 + + +def showhelp(config): + reporter = config.pluginmanager.get_plugin("terminalreporter") + tw = reporter._tw + tw.write(config._parser.optparser.format_help()) + tw.line() + tw.line() + tw.line( + "[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:" + ) + tw.line() + + for name in config._parser._ininames: + help, type, default = config._parser._inidict[name] + if type is None: + type = "string" + spec = "%s (%s)" % (name, type) + line = " %-24s %s" % (spec, help) + tw.line(line[: tw.fullwidth]) + + tw.line() + tw.line("environment variables:") + vars = [ + ("PYTEST_ADDOPTS", "extra command line options"), + ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"), + ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "set to disable plugin auto-loading"), + ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"), + ] + for name, help in vars: + tw.line(" %-24s %s" % (name, help)) + tw.line() + tw.line() + + tw.line("to see available markers type: pytest --markers") + tw.line("to see available fixtures type: pytest --fixtures") + tw.line( + "(shown according to specified file_or_dir or current dir " + "if not specified; fixtures with leading '_' are only shown " + "with the '-v' option" + ) + + for warningreport in reporter.stats.get("warnings", []): + tw.line("warning : " + warningreport.message, red=True) + return + + +conftest_options = [("pytest_plugins", "list of plugin names to load")] + + +def getpluginversioninfo(config): + lines = [] + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + lines.append("setuptools registered plugins:") + for plugin, dist in plugininfo: + loc = getattr(plugin, "__file__", repr(plugin)) + content = "%s-%s at %s" % (dist.project_name, dist.version, loc) + lines.append(" " + content) + return lines + + +def pytest_report_header(config): + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append("using: pytest-%s pylib-%s" % (pytest.__version__, py.__version__)) + + verinfo = getpluginversioninfo(config) + if verinfo: + lines.extend(verinfo) + + if config.option.traceconfig: + lines.append("active plugins:") + items = config.pluginmanager.list_name_plugin() + for name, plugin in items: + if hasattr(plugin, "__file__"): + r = plugin.__file__ + else: + r = repr(plugin) + lines.append(" %-20s: %s" % (name, r)) + return lines diff --git a/env_27/lib/python2.7/site-packages/_pytest/hookspec.py b/env_27/lib/python2.7/site-packages/_pytest/hookspec.py new file mode 100644 index 0000000..5a3eb28 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/hookspec.py @@ -0,0 +1,635 @@ +""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """ +from pluggy import HookspecMarker + +from _pytest.deprecated import PYTEST_LOGWARNING + +hookspec = HookspecMarker("pytest") + +# ------------------------------------------------------------------------- +# Initialization hooks called for every plugin +# ------------------------------------------------------------------------- + + +@hookspec(historic=True) +def pytest_addhooks(pluginmanager): + """called at plugin registration time to allow adding new hooks via a call to + ``pluginmanager.add_hookspecs(module_or_class, prefix)``. + + + :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_plugin_registered(plugin, manager): + """ a new pytest plugin got registered. + + :param plugin: the plugin module or instance + :param _pytest.config.PytestPluginManager manager: pytest plugin manager + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_addoption(parser): + """register argparse-style options and ini-style config values, + called once at the beginning of a test run. + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + + :arg _pytest.config.Parser parser: To add command line options, call + :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`. + To add ini-file values call :py:func:`parser.addini(...) + <_pytest.config.Parser.addini>`. + + Options can later be accessed through the + :py:class:`config <_pytest.config.Config>` object, respectively: + + - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to + retrieve the value of a command line option. + + - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve + a value read from an ini-style file. + + The config object is passed around on many internal objects via the ``.config`` + attribute or can be retrieved as the ``pytestconfig`` fixture. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_configure(config): + """ + Allows plugins and conftest files to perform initial configuration. + + This hook is called for every plugin and initial conftest file + after command line options have been parsed. + + After that, the hook is called for other conftest files as they are + imported. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + + :arg _pytest.config.Config config: pytest config object + """ + + +# ------------------------------------------------------------------------- +# Bootstrapping hooks called for plugins registered early enough: +# internal and 3rd party plugins. +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_cmdline_parse(pluginmanager, args): + """return initialized config object, parsing the specified args. + + Stops at first non-None result, see :ref:`firstresult` + + .. note:: + This hook will only be called for plugin classes passed to the ``plugins`` arg when using `pytest.main`_ to + perform an in-process test run. + + :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager + :param list[str] args: list of arguments passed on the command line + """ + + +def pytest_cmdline_preparse(config, args): + """(**Deprecated**) modify command line arguments before option parsing. + + This hook is considered deprecated and will be removed in a future pytest version. Consider + using :func:`pytest_load_initial_conftests` instead. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.Config config: pytest config object + :param list[str] args: list of arguments passed on the command line + """ + + +@hookspec(firstresult=True) +def pytest_cmdline_main(config): + """ called for performing the main command line action. The default + implementation will invoke the configure hooks and runtest_mainloop. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + Stops at first non-None result, see :ref:`firstresult` + + :param _pytest.config.Config config: pytest config object + """ + + +def pytest_load_initial_conftests(early_config, parser, args): + """ implements the loading of initial conftest files ahead + of command line option parsing. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.Config early_config: pytest config object + :param list[str] args: list of arguments passed on the command line + :param _pytest.config.Parser parser: to add command line options + """ + + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_collection(session): + """Perform the collection protocol for the given session. + + Stops at first non-None result, see :ref:`firstresult`. + + :param _pytest.main.Session session: the pytest session object + """ + + +def pytest_collection_modifyitems(session, config, items): + """ called after collection has been performed, may filter or re-order + the items in-place. + + :param _pytest.main.Session session: the pytest session object + :param _pytest.config.Config config: pytest config object + :param List[_pytest.nodes.Item] items: list of item objects + """ + + +def pytest_collection_finish(session): + """ called after collection has been performed and modified. + + :param _pytest.main.Session session: the pytest session object + """ + + +@hookspec(firstresult=True) +def pytest_ignore_collect(path, config): + """ return True to prevent considering this path for collection. + This hook is consulted for all files and directories prior to calling + more specific hooks. + + Stops at first non-None result, see :ref:`firstresult` + + :param str path: the path to analyze + :param _pytest.config.Config config: pytest config object + """ + + +@hookspec(firstresult=True) +def pytest_collect_directory(path, parent): + """ called before traversing a directory for collection files. + + Stops at first non-None result, see :ref:`firstresult` + + :param str path: the path to analyze + """ + + +def pytest_collect_file(path, parent): + """ return collection Node or None for the given path. Any new node + needs to have the specified ``parent`` as a parent. + + :param str path: the path to collect + """ + + +# logging hooks for collection + + +def pytest_collectstart(collector): + """ collector starts collecting. """ + + +def pytest_itemcollected(item): + """ we just collected a test item. """ + + +def pytest_collectreport(report): + """ collector finished collecting. """ + + +def pytest_deselected(items): + """ called for test items deselected by keyword. """ + + +@hookspec(firstresult=True) +def pytest_make_collect_report(collector): + """ perform ``collector.collect()`` and return a CollectReport. + + Stops at first non-None result, see :ref:`firstresult` """ + + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_pycollect_makemodule(path, parent): + """ return a Module collector or None for the given path. + This hook will be called for each matching test module path. + The pytest_collect_file hook needs to be used if you want to + create test modules for files that do not match as a test module. + + Stops at first non-None result, see :ref:`firstresult` """ + + +@hookspec(firstresult=True) +def pytest_pycollect_makeitem(collector, name, obj): + """ return custom item/collector for a python object in a module, or None. + + Stops at first non-None result, see :ref:`firstresult` """ + + +@hookspec(firstresult=True) +def pytest_pyfunc_call(pyfuncitem): + """ call underlying test function. + + Stops at first non-None result, see :ref:`firstresult` """ + + +def pytest_generate_tests(metafunc): + """ generate (multiple) parametrized calls to a test function.""" + + +@hookspec(firstresult=True) +def pytest_make_parametrize_id(config, val, argname): + """Return a user-friendly string representation of the given ``val`` that will be used + by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``. + The parameter name is available as ``argname``, if required. + + Stops at first non-None result, see :ref:`firstresult` + + :param _pytest.config.Config config: pytest config object + :param val: the parametrized value + :param str argname: the automatic parameter name produced by pytest + """ + + +# ------------------------------------------------------------------------- +# generic runtest related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_runtestloop(session): + """ called for performing the main runtest loop + (after collection finished). + + Stops at first non-None result, see :ref:`firstresult` + + :param _pytest.main.Session session: the pytest session object + """ + + +def pytest_itemstart(item, node): + """(**Deprecated**) use pytest_runtest_logstart. """ + + +@hookspec(firstresult=True) +def pytest_runtest_protocol(item, nextitem): + """ implements the runtest_setup/call/teardown protocol for + the given test item, including capturing exceptions and calling + reporting hooks. + + :arg item: test item for which the runtest protocol is performed. + + :arg nextitem: the scheduled-to-be-next test item (or None if this + is the end my friend). This argument is passed on to + :py:func:`pytest_runtest_teardown`. + + :return boolean: True if no further hook implementations should be invoked. + + + Stops at first non-None result, see :ref:`firstresult` """ + + +def pytest_runtest_logstart(nodeid, location): + """ signal the start of running a single test item. + + This hook will be called **before** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and + :func:`pytest_runtest_teardown` hooks. + + :param str nodeid: full id of the item + :param location: a triple of ``(filename, linenum, testname)`` + """ + + +def pytest_runtest_logfinish(nodeid, location): + """ signal the complete finish of running a single test item. + + This hook will be called **after** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and + :func:`pytest_runtest_teardown` hooks. + + :param str nodeid: full id of the item + :param location: a triple of ``(filename, linenum, testname)`` + """ + + +def pytest_runtest_setup(item): + """ called before ``pytest_runtest_call(item)``. """ + + +def pytest_runtest_call(item): + """ called to execute the test ``item``. """ + + +def pytest_runtest_teardown(item, nextitem): + """ called after ``pytest_runtest_call``. + + :arg nextitem: the scheduled-to-be-next test item (None if no further + test item is scheduled). This argument can be used to + perform exact teardowns, i.e. calling just enough finalizers + so that nextitem only needs to call setup-functions. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_makereport(item, call): + """ return a :py:class:`_pytest.runner.TestReport` object + for the given :py:class:`pytest.Item <_pytest.main.Item>` and + :py:class:`_pytest.runner.CallInfo`. + + Stops at first non-None result, see :ref:`firstresult` """ + + +def pytest_runtest_logreport(report): + """ process a test setup/call/teardown report relating to + the respective phase of executing a test. """ + + +@hookspec(firstresult=True) +def pytest_report_to_serializable(config, report): + """ + .. warning:: + This hook is experimental and subject to change between pytest releases, even + bug fixes. + + The intent is for this to be used by plugins maintained by the core-devs, such + as ``pytest-xdist``, ``pytest-subtests``, and as a replacement for the internal + 'resultlog' plugin. + + In the future it might become part of the public hook API. + + Serializes the given report object into a data structure suitable for sending + over the wire, e.g. converted to JSON. + """ + + +@hookspec(firstresult=True) +def pytest_report_from_serializable(config, data): + """ + .. warning:: + This hook is experimental and subject to change between pytest releases, even + bug fixes. + + The intent is for this to be used by plugins maintained by the core-devs, such + as ``pytest-xdist``, ``pytest-subtests``, and as a replacement for the internal + 'resultlog' plugin. + + In the future it might become part of the public hook API. + + Restores a report object previously serialized with pytest_report_to_serializable(). + """ + + +# ------------------------------------------------------------------------- +# Fixture related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_fixture_setup(fixturedef, request): + """ performs fixture setup execution. + + :return: The return value of the call to the fixture function + + Stops at first non-None result, see :ref:`firstresult` + + .. note:: + If the fixture function returns None, other implementations of + this hook function will continue to be called, according to the + behavior of the :ref:`firstresult` option. + """ + + +def pytest_fixture_post_finalizer(fixturedef, request): + """ called after fixture teardown, but before the cache is cleared so + the fixture result cache ``fixturedef.cached_result`` can + still be accessed.""" + + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + + +def pytest_sessionstart(session): + """ called after the ``Session`` object has been created and before performing collection + and entering the run test loop. + + :param _pytest.main.Session session: the pytest session object + """ + + +def pytest_sessionfinish(session, exitstatus): + """ called after whole test run finished, right before returning the exit status to the system. + + :param _pytest.main.Session session: the pytest session object + :param int exitstatus: the status which pytest will return to the system + """ + + +def pytest_unconfigure(config): + """ called before test process is exited. + + :param _pytest.config.Config config: pytest config object + """ + + +# ------------------------------------------------------------------------- +# hooks for customizing the assert methods +# ------------------------------------------------------------------------- + + +def pytest_assertrepr_compare(config, op, left, right): + """return explanation for comparisons in failing assert expressions. + + Return None for no custom explanation, otherwise return a list + of strings. The strings will be joined by newlines but any newlines + *in* a string will be escaped. Note that all but the first line will + be indented slightly, the intention is for the first line to be a summary. + + :param _pytest.config.Config config: pytest config object + """ + + +# ------------------------------------------------------------------------- +# hooks for influencing reporting (invoked from _pytest_terminal) +# ------------------------------------------------------------------------- + + +def pytest_report_header(config, startdir): + """ return a string or list of strings to be displayed as header info for terminal reporting. + + :param _pytest.config.Config config: pytest config object + :param startdir: py.path object with the starting dir + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + """ + + +def pytest_report_collectionfinish(config, startdir, items): + """ + .. versionadded:: 3.2 + + return a string or list of strings to be displayed after collection has finished successfully. + + This strings will be displayed after the standard "collected X items" message. + + :param _pytest.config.Config config: pytest config object + :param startdir: py.path object with the starting dir + :param items: list of pytest items that are going to be executed; this list should not be modified. + """ + + +@hookspec(firstresult=True) +def pytest_report_teststatus(report, config): + """ return result-category, shortletter and verbose word for reporting. + + :param _pytest.config.Config config: pytest config object + + Stops at first non-None result, see :ref:`firstresult` """ + + +def pytest_terminal_summary(terminalreporter, exitstatus, config): + """Add a section to terminal summary reporting. + + :param _pytest.terminal.TerminalReporter terminalreporter: the internal terminal reporter object + :param int exitstatus: the exit status that will be reported back to the OS + :param _pytest.config.Config config: pytest config object + + .. versionadded:: 4.2 + The ``config`` parameter. + """ + + +@hookspec(historic=True, warn_on_impl=PYTEST_LOGWARNING) +def pytest_logwarning(message, code, nodeid, fslocation): + """ + .. deprecated:: 3.8 + + This hook is will stop working in a future release. + + pytest no longer triggers this hook, but the + terminal writer still implements it to display warnings issued by + :meth:`_pytest.config.Config.warn` and :meth:`_pytest.nodes.Node.warn`. Calling those functions will be + an error in future releases. + + process a warning specified by a message, a code string, + a nodeid and fslocation (both of which may be None + if the warning is not tied to a particular node/location). + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_warning_captured(warning_message, when, item): + """ + Process a warning captured by the internal pytest warnings plugin. + + :param warnings.WarningMessage warning_message: + The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains + the same attributes as the parameters of :py:func:`warnings.showwarning`. + + :param str when: + Indicates when the warning was captured. Possible values: + + * ``"config"``: during pytest configuration/initialization stage. + * ``"collect"``: during test collection. + * ``"runtest"``: during test execution. + + :param pytest.Item|None item: + **DEPRECATED**: This parameter is incompatible with ``pytest-xdist``, and will always receive ``None`` + in a future release. + + The item being executed if ``when`` is ``"runtest"``, otherwise ``None``. + """ + + +# ------------------------------------------------------------------------- +# doctest hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_doctest_prepare_content(content): + """ return processed content for a given doctest + + Stops at first non-None result, see :ref:`firstresult` """ + + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + + +def pytest_internalerror(excrepr, excinfo): + """ called for internal errors. """ + + +def pytest_keyboard_interrupt(excinfo): + """ called for keyboard interrupt. """ + + +def pytest_exception_interact(node, call, report): + """called when an exception was raised which can potentially be + interactively handled. + + This hook is only called if an exception was raised + that is not an internal exception like ``skip.Exception``. + """ + + +def pytest_enter_pdb(config, pdb): + """ called upon pdb.set_trace(), can be used by plugins to take special + action just before the python debugger enters in interactive mode. + + :param _pytest.config.Config config: pytest config object + :param pdb.Pdb pdb: Pdb instance + """ + + +def pytest_leave_pdb(config, pdb): + """ called when leaving pdb (e.g. with continue after pdb.set_trace()). + + Can be used by plugins to take special action just after the python + debugger leaves interactive mode. + + :param _pytest.config.Config config: pytest config object + :param pdb.Pdb pdb: Pdb instance + """ diff --git a/env_27/lib/python2.7/site-packages/_pytest/junitxml.py b/env_27/lib/python2.7/site-packages/_pytest/junitxml.py new file mode 100644 index 0000000..122e0c7 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/junitxml.py @@ -0,0 +1,627 @@ +""" + report test results in JUnit-XML format, + for use with Jenkins and build integration servers. + + +Based on initial code from Ross Lawley. + +Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/ +src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import os +import re +import sys +import time + +import py +import six + +import pytest +from _pytest import nodes +from _pytest.config import filename_arg + +# Python 2.X and 3.X compatibility +if sys.version_info[0] < 3: + from codecs import open + + +class Junit(py.xml.Namespace): + pass + + +# We need to get the subset of the invalid unicode ranges according to +# XML 1.0 which are valid in this python build. Hence we calculate +# this dynamically instead of hardcoding it. The spec range of valid +# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] +# | [#x10000-#x10FFFF] +_legal_chars = (0x09, 0x0A, 0x0D) +_legal_ranges = ((0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF)) +_legal_xml_re = [ + u"%s-%s" % (six.unichr(low), six.unichr(high)) + for (low, high) in _legal_ranges + if low < sys.maxunicode +] +_legal_xml_re = [six.unichr(x) for x in _legal_chars] + _legal_xml_re +illegal_xml_re = re.compile(u"[^%s]" % u"".join(_legal_xml_re)) +del _legal_chars +del _legal_ranges +del _legal_xml_re + +_py_ext_re = re.compile(r"\.py$") + + +def bin_xml_escape(arg): + def repl(matchobj): + i = ord(matchobj.group()) + if i <= 0xFF: + return u"#x%02X" % i + else: + return u"#x%04X" % i + + return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg))) + + +def merge_family(left, right): + result = {} + for kl, vl in left.items(): + for kr, vr in right.items(): + if not isinstance(vl, list): + raise TypeError(type(vl)) + result[kl] = vl + vr + left.update(result) + + +families = {} +families["_base"] = {"testcase": ["classname", "name"]} +families["_base_legacy"] = {"testcase": ["file", "line", "url"]} + +# xUnit 1.x inherits legacy attributes +families["xunit1"] = families["_base"].copy() +merge_family(families["xunit1"], families["_base_legacy"]) + +# xUnit 2.x uses strict base attributes +families["xunit2"] = families["_base"] + + +class _NodeReporter(object): + def __init__(self, nodeid, xml): + self.id = nodeid + self.xml = xml + self.add_stats = self.xml.add_stats + self.family = self.xml.family + self.duration = 0 + self.properties = [] + self.nodes = [] + self.testcase = None + self.attrs = {} + + def append(self, node): + self.xml.add_stats(type(node).__name__) + self.nodes.append(node) + + def add_property(self, name, value): + self.properties.append((str(name), bin_xml_escape(value))) + + def add_attribute(self, name, value): + self.attrs[str(name)] = bin_xml_escape(value) + + def make_properties_node(self): + """Return a Junit node containing custom properties, if any. + """ + if self.properties: + return Junit.properties( + [ + Junit.property(name=name, value=value) + for name, value in self.properties + ] + ) + return "" + + def record_testreport(self, testreport): + assert not self.testcase + names = mangle_test_address(testreport.nodeid) + existing_attrs = self.attrs + classnames = names[:-1] + if self.xml.prefix: + classnames.insert(0, self.xml.prefix) + attrs = { + "classname": ".".join(classnames), + "name": bin_xml_escape(names[-1]), + "file": testreport.location[0], + } + if testreport.location[1] is not None: + attrs["line"] = testreport.location[1] + if hasattr(testreport, "url"): + attrs["url"] = testreport.url + self.attrs = attrs + self.attrs.update(existing_attrs) # restore any user-defined attributes + + # Preserve legacy testcase behavior + if self.family == "xunit1": + return + + # Filter out attributes not permitted by this test family. + # Including custom attributes because they are not valid here. + temp_attrs = {} + for key in self.attrs.keys(): + if key in families[self.family]["testcase"]: + temp_attrs[key] = self.attrs[key] + self.attrs = temp_attrs + + def to_xml(self): + testcase = Junit.testcase(time="%.3f" % self.duration, **self.attrs) + testcase.append(self.make_properties_node()) + for node in self.nodes: + testcase.append(node) + return testcase + + def _add_simple(self, kind, message, data=None): + data = bin_xml_escape(data) + node = kind(data, message=message) + self.append(node) + + def write_captured_output(self, report): + content_out = report.capstdout + content_log = report.caplog + content_err = report.capstderr + + if content_log or content_out: + if content_log and self.xml.logging == "system-out": + if content_out: + # syncing stdout and the log-output is not done yet. It's + # probably not worth the effort. Therefore, first the captured + # stdout is shown and then the captured logs. + content = "\n".join( + [ + " Captured Stdout ".center(80, "-"), + content_out, + "", + " Captured Log ".center(80, "-"), + content_log, + ] + ) + else: + content = content_log + else: + content = content_out + + if content: + tag = getattr(Junit, "system-out") + self.append(tag(bin_xml_escape(content))) + + if content_log or content_err: + if content_log and self.xml.logging == "system-err": + if content_err: + content = "\n".join( + [ + " Captured Stderr ".center(80, "-"), + content_err, + "", + " Captured Log ".center(80, "-"), + content_log, + ] + ) + else: + content = content_log + else: + content = content_err + + if content: + tag = getattr(Junit, "system-err") + self.append(tag(bin_xml_escape(content))) + + def append_pass(self, report): + self.add_stats("passed") + + def append_failure(self, report): + # msg = str(report.longrepr.reprtraceback.extraline) + if hasattr(report, "wasxfail"): + self._add_simple(Junit.skipped, "xfail-marked test passes unexpectedly") + else: + if hasattr(report.longrepr, "reprcrash"): + message = report.longrepr.reprcrash.message + elif isinstance(report.longrepr, six.string_types): + message = report.longrepr + else: + message = str(report.longrepr) + message = bin_xml_escape(message) + fail = Junit.failure(message=message) + fail.append(bin_xml_escape(report.longrepr)) + self.append(fail) + + def append_collect_error(self, report): + # msg = str(report.longrepr.reprtraceback.extraline) + self.append( + Junit.error(bin_xml_escape(report.longrepr), message="collection failure") + ) + + def append_collect_skipped(self, report): + self._add_simple(Junit.skipped, "collection skipped", report.longrepr) + + def append_error(self, report): + if report.when == "teardown": + msg = "test teardown failure" + else: + msg = "test setup failure" + self._add_simple(Junit.error, msg, report.longrepr) + + def append_skipped(self, report): + if hasattr(report, "wasxfail"): + self._add_simple(Junit.skipped, "expected test failure", report.wasxfail) + else: + filename, lineno, skipreason = report.longrepr + if skipreason.startswith("Skipped: "): + skipreason = skipreason[9:] + details = "%s:%s: %s" % (filename, lineno, skipreason) + + self.append( + Junit.skipped( + bin_xml_escape(details), + type="pytest.skip", + message=bin_xml_escape(skipreason), + ) + ) + self.write_captured_output(report) + + def finalize(self): + data = self.to_xml().unicode(indent=0) + self.__dict__.clear() + self.to_xml = lambda: py.xml.raw(data) + + +@pytest.fixture +def record_property(request): + """Add an extra properties the calling test. + User properties become part of the test report and are available to the + configured reporters, like JUnit XML. + The fixture is callable with ``(name, value)``, with value being automatically + xml-encoded. + + Example:: + + def test_function(record_property): + record_property("example_key", 1) + """ + + def append_property(name, value): + request.node.user_properties.append((name, value)) + + return append_property + + +@pytest.fixture +def record_xml_attribute(request): + """Add extra xml attributes to the tag for the calling test. + The fixture is callable with ``(name, value)``, with value being + automatically xml-encoded + """ + from _pytest.warning_types import PytestWarning + + request.node.warn(PytestWarning("record_xml_attribute is an experimental feature")) + + # Declare noop + def add_attr_noop(name, value): + pass + + attr_func = add_attr_noop + xml = getattr(request.config, "_xml", None) + + if xml is not None and xml.family != "xunit1": + request.node.warn( + PytestWarning( + "record_xml_attribute is incompatible with junit_family: " + "%s (use: legacy|xunit1)" % xml.family + ) + ) + elif xml is not None: + node_reporter = xml.node_reporter(request.node.nodeid) + attr_func = node_reporter.add_attribute + + return attr_func + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting") + group.addoption( + "--junitxml", + "--junit-xml", + action="store", + dest="xmlpath", + metavar="path", + type=functools.partial(filename_arg, optname="--junitxml"), + default=None, + help="create junit-xml style report file at given path.", + ) + group.addoption( + "--junitprefix", + "--junit-prefix", + action="store", + metavar="str", + default=None, + help="prepend prefix to classnames in junit-xml output", + ) + parser.addini( + "junit_suite_name", "Test suite name for JUnit report", default="pytest" + ) + parser.addini( + "junit_logging", + "Write captured log messages to JUnit report: " + "one of no|system-out|system-err", + default="no", + ) # choices=['no', 'stdout', 'stderr']) + parser.addini( + "junit_duration_report", + "Duration time to report: one of total|call", + default="total", + ) # choices=['total', 'call']) + parser.addini( + "junit_family", + "Emit XML for schema: one of legacy|xunit1|xunit2", + default="xunit1", + ) + + +def pytest_configure(config): + xmlpath = config.option.xmlpath + # prevent opening xmllog on slave nodes (xdist) + if xmlpath and not hasattr(config, "slaveinput"): + config._xml = LogXML( + xmlpath, + config.option.junitprefix, + config.getini("junit_suite_name"), + config.getini("junit_logging"), + config.getini("junit_duration_report"), + config.getini("junit_family"), + ) + config.pluginmanager.register(config._xml) + + +def pytest_unconfigure(config): + xml = getattr(config, "_xml", None) + if xml: + del config._xml + config.pluginmanager.unregister(xml) + + +def mangle_test_address(address): + path, possible_open_bracket, params = address.partition("[") + names = path.split("::") + try: + names.remove("()") + except ValueError: + pass + # convert file path to dotted path + names[0] = names[0].replace(nodes.SEP, ".") + names[0] = _py_ext_re.sub("", names[0]) + # put any params back + names[-1] += possible_open_bracket + params + return names + + +class LogXML(object): + def __init__( + self, + logfile, + prefix, + suite_name="pytest", + logging="no", + report_duration="total", + family="xunit1", + ): + logfile = os.path.expanduser(os.path.expandvars(logfile)) + self.logfile = os.path.normpath(os.path.abspath(logfile)) + self.prefix = prefix + self.suite_name = suite_name + self.logging = logging + self.report_duration = report_duration + self.family = family + self.stats = dict.fromkeys(["error", "passed", "failure", "skipped"], 0) + self.node_reporters = {} # nodeid -> _NodeReporter + self.node_reporters_ordered = [] + self.global_properties = [] + # List of reports that failed on call but teardown is pending. + self.open_reports = [] + self.cnt_double_fail_tests = 0 + + # Replaces convenience family with real family + if self.family == "legacy": + self.family = "xunit1" + + def finalize(self, report): + nodeid = getattr(report, "nodeid", report) + # local hack to handle xdist report order + slavenode = getattr(report, "node", None) + reporter = self.node_reporters.pop((nodeid, slavenode)) + if reporter is not None: + reporter.finalize() + + def node_reporter(self, report): + nodeid = getattr(report, "nodeid", report) + # local hack to handle xdist report order + slavenode = getattr(report, "node", None) + + key = nodeid, slavenode + + if key in self.node_reporters: + # TODO: breasks for --dist=each + return self.node_reporters[key] + + reporter = _NodeReporter(nodeid, self) + + self.node_reporters[key] = reporter + self.node_reporters_ordered.append(reporter) + + return reporter + + def add_stats(self, key): + if key in self.stats: + self.stats[key] += 1 + + def _opentestcase(self, report): + reporter = self.node_reporter(report) + reporter.record_testreport(report) + return reporter + + def pytest_runtest_logreport(self, report): + """handle a setup/call/teardown report, generating the appropriate + xml tags as necessary. + + note: due to plugins like xdist, this hook may be called in interlaced + order with reports from other nodes. for example: + + usual call order: + -> setup node1 + -> call node1 + -> teardown node1 + -> setup node2 + -> call node2 + -> teardown node2 + + possible call order in xdist: + -> setup node1 + -> call node1 + -> setup node2 + -> call node2 + -> teardown node2 + -> teardown node1 + """ + close_report = None + if report.passed: + if report.when == "call": # ignore setup/teardown + reporter = self._opentestcase(report) + reporter.append_pass(report) + elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema + self.finalize(close_report) + self.cnt_double_fail_tests += 1 + reporter = self._opentestcase(report) + if report.when == "call": + reporter.append_failure(report) + self.open_reports.append(report) + else: + reporter.append_error(report) + elif report.skipped: + reporter = self._opentestcase(report) + reporter.append_skipped(report) + self.update_testcase_duration(report) + if report.when == "teardown": + reporter = self._opentestcase(report) + reporter.write_captured_output(report) + + for propname, propvalue in report.user_properties: + reporter.add_property(propname, propvalue) + + self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + self.open_reports.remove(close_report) + + def update_testcase_duration(self, report): + """accumulates total duration for nodeid from given report and updates + the Junit.testcase with the new total if already created. + """ + if self.report_duration == "total" or report.when == self.report_duration: + reporter = self.node_reporter(report) + reporter.duration += getattr(report, "duration", 0.0) + + def pytest_collectreport(self, report): + if not report.passed: + reporter = self._opentestcase(report) + if report.failed: + reporter.append_collect_error(report) + else: + reporter.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr): + reporter = self.node_reporter("internal") + reporter.attrs.update(classname="pytest", name="internal") + reporter._add_simple(Junit.error, "internal error", excrepr) + + def pytest_sessionstart(self): + self.suite_start_time = time.time() + + def pytest_sessionfinish(self): + dirname = os.path.dirname(os.path.abspath(self.logfile)) + if not os.path.isdir(dirname): + os.makedirs(dirname) + logfile = open(self.logfile, "w", encoding="utf-8") + suite_stop_time = time.time() + suite_time_delta = suite_stop_time - self.suite_start_time + + numtests = ( + self.stats["passed"] + + self.stats["failure"] + + self.stats["skipped"] + + self.stats["error"] + - self.cnt_double_fail_tests + ) + logfile.write('') + + logfile.write( + Junit.testsuite( + self._get_global_properties_node(), + [x.to_xml() for x in self.node_reporters_ordered], + name=self.suite_name, + errors=self.stats["error"], + failures=self.stats["failure"], + skipped=self.stats["skipped"], + tests=numtests, + time="%.3f" % suite_time_delta, + ).unicode(indent=0) + ) + logfile.close() + + def pytest_terminal_summary(self, terminalreporter): + terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile)) + + def add_global_property(self, name, value): + self.global_properties.append((str(name), bin_xml_escape(value))) + + def _get_global_properties_node(self): + """Return a Junit node containing custom properties, if any. + """ + if self.global_properties: + return Junit.properties( + [ + Junit.property(name=name, value=value) + for name, value in self.global_properties + ] + ) + return "" diff --git a/env_27/lib/python2.7/site-packages/_pytest/logging.py b/env_27/lib/python2.7/site-packages/_pytest/logging.py new file mode 100644 index 0000000..757cb27 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/logging.py @@ -0,0 +1,662 @@ +""" Access and control log capturing. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging +import re +from contextlib import contextmanager + +import py +import six + +import pytest +from _pytest.compat import dummy_context_manager +from _pytest.config import create_terminal_writer +from _pytest.pathlib import Path + +DEFAULT_LOG_FORMAT = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" + + +class ColoredLevelFormatter(logging.Formatter): + """ + Colorize the %(levelname)..s part of the log format passed to __init__. + """ + + LOGLEVEL_COLOROPTS = { + logging.CRITICAL: {"red"}, + logging.ERROR: {"red", "bold"}, + logging.WARNING: {"yellow"}, + logging.WARN: {"yellow"}, + logging.INFO: {"green"}, + logging.DEBUG: {"purple"}, + logging.NOTSET: set(), + } + LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-]?\d*s)") + + def __init__(self, terminalwriter, *args, **kwargs): + super(ColoredLevelFormatter, self).__init__(*args, **kwargs) + if six.PY2: + self._original_fmt = self._fmt + else: + self._original_fmt = self._style._fmt + self._level_to_fmt_mapping = {} + + levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) + if not levelname_fmt_match: + return + levelname_fmt = levelname_fmt_match.group() + + for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): + formatted_levelname = levelname_fmt % { + "levelname": logging.getLevelName(level) + } + + # add ANSI escape sequences around the formatted levelname + color_kwargs = {name: True for name in color_opts} + colorized_formatted_levelname = terminalwriter.markup( + formatted_levelname, **color_kwargs + ) + self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( + colorized_formatted_levelname, self._fmt + ) + + def format(self, record): + fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) + if six.PY2: + self._fmt = fmt + else: + self._style._fmt = fmt + return super(ColoredLevelFormatter, self).format(record) + + +def get_option_ini(config, *names): + for name in names: + ret = config.getoption(name) # 'default' arg won't work as expected + if ret is None: + ret = config.getini(name) + if ret: + return ret + + +def pytest_addoption(parser): + """Add options to control log capturing.""" + group = parser.getgroup("logging") + + def add_option_ini(option, dest, default=None, type=None, **kwargs): + parser.addini( + dest, default=default, type=type, help="default value for " + option + ) + group.addoption(option, dest=dest, **kwargs) + + add_option_ini( + "--no-print-logs", + dest="log_print", + action="store_const", + const=False, + default=True, + type="bool", + help="disable printing caught logs on failed tests.", + ) + add_option_ini( + "--log-level", + dest="log_level", + default=None, + help="logging level used by the logging module", + ) + add_option_ini( + "--log-format", + dest="log_format", + default=DEFAULT_LOG_FORMAT, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-date-format", + dest="log_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="log date format as used by the logging module.", + ) + parser.addini( + "log_cli", + default=False, + type="bool", + help='enable log display during test run (also known as "live logging").', + ) + add_option_ini( + "--log-cli-level", dest="log_cli_level", default=None, help="cli logging level." + ) + add_option_ini( + "--log-cli-format", + dest="log_cli_format", + default=None, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-cli-date-format", + dest="log_cli_date_format", + default=None, + help="log date format as used by the logging module.", + ) + add_option_ini( + "--log-file", + dest="log_file", + default=None, + help="path to a file when logging will be written to.", + ) + add_option_ini( + "--log-file-level", + dest="log_file_level", + default=None, + help="log file logging level.", + ) + add_option_ini( + "--log-file-format", + dest="log_file_format", + default=DEFAULT_LOG_FORMAT, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-file-date-format", + dest="log_file_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="log date format as used by the logging module.", + ) + + +@contextmanager +def catching_logs(handler, formatter=None, level=None): + """Context manager that prepares the whole logging machinery properly.""" + root_logger = logging.getLogger() + + if formatter is not None: + handler.setFormatter(formatter) + if level is not None: + handler.setLevel(level) + + # Adding the same handler twice would confuse logging system. + # Just don't do that. + add_new_handler = handler not in root_logger.handlers + + if add_new_handler: + root_logger.addHandler(handler) + if level is not None: + orig_level = root_logger.level + root_logger.setLevel(min(orig_level, level)) + try: + yield handler + finally: + if level is not None: + root_logger.setLevel(orig_level) + if add_new_handler: + root_logger.removeHandler(handler) + + +class LogCaptureHandler(logging.StreamHandler): + """A logging handler that stores log records and the log text.""" + + def __init__(self): + """Creates a new log handler.""" + logging.StreamHandler.__init__(self, py.io.TextIO()) + self.records = [] + + def emit(self, record): + """Keep the log records in a list in addition to the log text.""" + self.records.append(record) + logging.StreamHandler.emit(self, record) + + def reset(self): + self.records = [] + self.stream = py.io.TextIO() + + +class LogCaptureFixture(object): + """Provides access and control of log capturing.""" + + def __init__(self, item): + """Creates a new funcarg.""" + self._item = item + # dict of log name -> log level + self._initial_log_levels = {} # Dict[str, int] + + def _finalize(self): + """Finalizes the fixture. + + This restores the log levels changed by :meth:`set_level`. + """ + # restore log levels + for logger_name, level in self._initial_log_levels.items(): + logger = logging.getLogger(logger_name) + logger.setLevel(level) + + @property + def handler(self): + """ + :rtype: LogCaptureHandler + """ + return self._item.catch_log_handler + + def get_records(self, when): + """ + Get the logging records for one of the possible test phases. + + :param str when: + Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown". + + :rtype: List[logging.LogRecord] + :return: the list of captured records at the given stage + + .. versionadded:: 3.4 + """ + handler = self._item.catch_log_handlers.get(when) + if handler: + return handler.records + else: + return [] + + @property + def text(self): + """Returns the log text.""" + return self.handler.stream.getvalue() + + @property + def records(self): + """Returns the list of log records.""" + return self.handler.records + + @property + def record_tuples(self): + """Returns a list of a stripped down version of log records intended + for use in assertion comparison. + + The format of the tuple is: + + (logger_name, log_level, message) + """ + return [(r.name, r.levelno, r.getMessage()) for r in self.records] + + @property + def messages(self): + """Returns a list of format-interpolated log messages. + + Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list + are all interpolated. + Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with + levels, timestamps, etc, making exact comparisons more reliable. + + Note that traceback or stack info (from :func:`logging.exception` or the `exc_info` or `stack_info` arguments + to the logging functions) is not included, as this is added by the formatter in the handler. + + .. versionadded:: 3.7 + """ + return [r.getMessage() for r in self.records] + + def clear(self): + """Reset the list of log records and the captured log text.""" + self.handler.reset() + + def set_level(self, level, logger=None): + """Sets the level for capturing of logs. The level will be restored to its previous value at the end of + the test. + + :param int level: the logger to level. + :param str logger: the logger to update the level. If not given, the root logger level is updated. + + .. versionchanged:: 3.4 + The levels of the loggers changed by this function will be restored to their initial values at the + end of the test. + """ + logger_name = logger + logger = logging.getLogger(logger_name) + # save the original log-level to restore it during teardown + self._initial_log_levels.setdefault(logger_name, logger.level) + logger.setLevel(level) + + @contextmanager + def at_level(self, level, logger=None): + """Context manager that sets the level for capturing of logs. After the end of the 'with' statement the + level is restored to its original value. + + :param int level: the logger to level. + :param str logger: the logger to update the level. If not given, the root logger level is updated. + """ + logger = logging.getLogger(logger) + orig_level = logger.level + logger.setLevel(level) + try: + yield + finally: + logger.setLevel(orig_level) + + +@pytest.fixture +def caplog(request): + """Access and control log capturing. + + Captured logs are available through the following properties/methods:: + + * caplog.text -> string containing formatted log output + * caplog.records -> list of logging.LogRecord instances + * caplog.record_tuples -> list of (logger_name, level, message) tuples + * caplog.clear() -> clear captured records and formatted log output string + """ + result = LogCaptureFixture(request.node) + yield result + result._finalize() + + +def get_actual_log_level(config, *setting_names): + """Return the actual logging level.""" + + for setting_name in setting_names: + log_level = config.getoption(setting_name) + if log_level is None: + log_level = config.getini(setting_name) + if log_level: + break + else: + return + + if isinstance(log_level, six.string_types): + log_level = log_level.upper() + try: + return int(getattr(logging, log_level, log_level)) + except ValueError: + # Python logging does not recognise this as a logging level + raise pytest.UsageError( + "'{}' is not recognized as a logging level name for " + "'{}'. Please consider passing the " + "logging level num instead.".format(log_level, setting_name) + ) + + +# run after terminalreporter/capturemanager are configured +@pytest.hookimpl(trylast=True) +def pytest_configure(config): + config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") + + +class LoggingPlugin(object): + """Attaches to the logging module and captures log messages for each test. + """ + + def __init__(self, config): + """Creates a new plugin to capture log messages. + + The formatter can be safely shared across all handlers so + create a single one for the entire test session here. + """ + self._config = config + + # enable verbose output automatically if live logging is enabled + if self._log_cli_enabled() and config.getoption("verbose") < 1: + config.option.verbose = 1 + + self.print_logs = get_option_ini(config, "log_print") + self.formatter = logging.Formatter( + get_option_ini(config, "log_format"), + get_option_ini(config, "log_date_format"), + ) + self.log_level = get_actual_log_level(config, "log_level") + + self.log_file_level = get_actual_log_level(config, "log_file_level") + self.log_file_format = get_option_ini(config, "log_file_format", "log_format") + self.log_file_date_format = get_option_ini( + config, "log_file_date_format", "log_date_format" + ) + self.log_file_formatter = logging.Formatter( + self.log_file_format, datefmt=self.log_file_date_format + ) + + log_file = get_option_ini(config, "log_file") + if log_file: + self.log_file_handler = logging.FileHandler( + log_file, mode="w", encoding="UTF-8" + ) + self.log_file_handler.setFormatter(self.log_file_formatter) + else: + self.log_file_handler = None + + self.log_cli_handler = None + + self.live_logs_context = lambda: dummy_context_manager() + # Note that the lambda for the live_logs_context is needed because + # live_logs_context can otherwise not be entered multiple times due + # to limitations of contextlib.contextmanager. + + if self._log_cli_enabled(): + self._setup_cli_logging() + + def _setup_cli_logging(self): + config = self._config + terminal_reporter = config.pluginmanager.get_plugin("terminalreporter") + if terminal_reporter is None: + # terminal reporter is disabled e.g. by pytest-xdist. + return + + capture_manager = config.pluginmanager.get_plugin("capturemanager") + # if capturemanager plugin is disabled, live logging still works. + log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager) + log_cli_format = get_option_ini(config, "log_cli_format", "log_format") + log_cli_date_format = get_option_ini( + config, "log_cli_date_format", "log_date_format" + ) + if ( + config.option.color != "no" + and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format) + ): + log_cli_formatter = ColoredLevelFormatter( + create_terminal_writer(config), + log_cli_format, + datefmt=log_cli_date_format, + ) + else: + log_cli_formatter = logging.Formatter( + log_cli_format, datefmt=log_cli_date_format + ) + log_cli_level = get_actual_log_level(config, "log_cli_level", "log_level") + self.log_cli_handler = log_cli_handler + self.live_logs_context = lambda: catching_logs( + log_cli_handler, formatter=log_cli_formatter, level=log_cli_level + ) + + def set_log_path(self, fname): + """Public method, which can set filename parameter for + Logging.FileHandler(). Also creates parent directory if + it does not exist. + + .. warning:: + Please considered as an experimental API. + """ + fname = Path(fname) + + if not fname.is_absolute(): + fname = Path(self._config.rootdir, fname) + + if not fname.parent.exists(): + fname.parent.mkdir(exist_ok=True, parents=True) + + self.log_file_handler = logging.FileHandler( + str(fname), mode="w", encoding="UTF-8" + ) + self.log_file_handler.setFormatter(self.log_file_formatter) + + def _log_cli_enabled(self): + """Return True if log_cli should be considered enabled, either explicitly + or because --log-cli-level was given in the command-line. + """ + return self._config.getoption( + "--log-cli-level" + ) is not None or self._config.getini("log_cli") + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_collection(self): + with self.live_logs_context(): + if self.log_cli_handler: + self.log_cli_handler.set_when("collection") + + if self.log_file_handler is not None: + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + else: + yield + + @contextmanager + def _runtest_for(self, item, when): + with self._runtest_for_main(item, when): + if self.log_file_handler is not None: + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + else: + yield + + @contextmanager + def _runtest_for_main(self, item, when): + """Implements the internals of pytest_runtest_xxx() hook.""" + with catching_logs( + LogCaptureHandler(), formatter=self.formatter, level=self.log_level + ) as log_handler: + if self.log_cli_handler: + self.log_cli_handler.set_when(when) + + if item is None: + yield # run the test + return + + if not hasattr(item, "catch_log_handlers"): + item.catch_log_handlers = {} + item.catch_log_handlers[when] = log_handler + item.catch_log_handler = log_handler + try: + yield # run test + finally: + if when == "teardown": + del item.catch_log_handler + del item.catch_log_handlers + + if self.print_logs: + # Add a captured log section to the report. + log = log_handler.stream.getvalue().strip() + item.add_report_section(when, "log", log) + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item): + with self._runtest_for(item, "setup"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item): + with self._runtest_for(item, "call"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item): + with self._runtest_for(item, "teardown"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_logstart(self): + if self.log_cli_handler: + self.log_cli_handler.reset() + with self._runtest_for(None, "start"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_logfinish(self): + with self._runtest_for(None, "finish"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_logreport(self): + with self._runtest_for(None, "logreport"): + yield + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_sessionfinish(self): + with self.live_logs_context(): + if self.log_cli_handler: + self.log_cli_handler.set_when("sessionfinish") + if self.log_file_handler is not None: + try: + with catching_logs( + self.log_file_handler, level=self.log_file_level + ): + yield + finally: + # Close the FileHandler explicitly. + # (logging.shutdown might have lost the weakref?!) + self.log_file_handler.close() + else: + yield + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_sessionstart(self): + with self.live_logs_context(): + if self.log_cli_handler: + self.log_cli_handler.set_when("sessionstart") + if self.log_file_handler is not None: + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + else: + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtestloop(self, session): + """Runs all collected test items.""" + with self.live_logs_context(): + if self.log_file_handler is not None: + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield # run all the tests + else: + yield # run all the tests + + +class _LiveLoggingStreamHandler(logging.StreamHandler): + """ + Custom StreamHandler used by the live logging feature: it will write a newline before the first log message + in each test. + + During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured + and won't appear in the terminal. + """ + + def __init__(self, terminal_reporter, capture_manager): + """ + :param _pytest.terminal.TerminalReporter terminal_reporter: + :param _pytest.capture.CaptureManager capture_manager: + """ + logging.StreamHandler.__init__(self, stream=terminal_reporter) + self.capture_manager = capture_manager + self.reset() + self.set_when(None) + self._test_outcome_written = False + + def reset(self): + """Reset the handler; should be called before the start of each test""" + self._first_record_emitted = False + + def set_when(self, when): + """Prepares for the given test phase (setup/call/teardown)""" + self._when = when + self._section_name_shown = False + if when == "start": + self._test_outcome_written = False + + def emit(self, record): + ctx_manager = ( + self.capture_manager.global_and_fixture_disabled() + if self.capture_manager + else dummy_context_manager() + ) + with ctx_manager: + if not self._first_record_emitted: + self.stream.write("\n") + self._first_record_emitted = True + elif self._when in ("teardown", "finish"): + if not self._test_outcome_written: + self._test_outcome_written = True + self.stream.write("\n") + if not self._section_name_shown and self._when: + self.stream.section("live log " + self._when, sep="-", bold=True) + self._section_name_shown = True + logging.StreamHandler.emit(self, record) diff --git a/env_27/lib/python2.7/site-packages/_pytest/main.py b/env_27/lib/python2.7/site-packages/_pytest/main.py new file mode 100644 index 0000000..19553ca --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/main.py @@ -0,0 +1,773 @@ +""" core implementation of testing process: init, session, runtest loop. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import fnmatch +import functools +import os +import pkgutil +import sys +import warnings + +import attr +import py +import six + +import _pytest._code +from _pytest import nodes +from _pytest.config import directory_arg +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.deprecated import PYTEST_CONFIG_GLOBAL +from _pytest.outcomes import exit +from _pytest.runner import collect_one_node + +# exitcodes for the command line +EXIT_OK = 0 +EXIT_TESTSFAILED = 1 +EXIT_INTERRUPTED = 2 +EXIT_INTERNALERROR = 3 +EXIT_USAGEERROR = 4 +EXIT_NOTESTSCOLLECTED = 5 + + +def pytest_addoption(parser): + parser.addini( + "norecursedirs", + "directory patterns to avoid for recursion", + type="args", + default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"], + ) + parser.addini( + "testpaths", + "directories to search for tests when no files or directories are given in the " + "command line.", + type="args", + default=[], + ) + # parser.addini("dirpatterns", + # "patterns specifying possible locations of test files", + # type="linelist", default=["**/test_*.txt", + # "**/test_*.py", "**/*_test.py"] + # ) + group = parser.getgroup("general", "running and selection options") + group._addoption( + "-x", + "--exitfirst", + action="store_const", + dest="maxfail", + const=1, + help="exit instantly on first error or failed test.", + ), + group._addoption( + "--maxfail", + metavar="num", + action="store", + type=int, + dest="maxfail", + default=0, + help="exit after first num failures or errors.", + ) + group._addoption( + "--strict", + action="store_true", + help="marks not registered in configuration file raise errors.", + ) + group._addoption( + "-c", + metavar="file", + type=str, + dest="inifilename", + help="load configuration from `file` instead of trying to locate one of the implicit " + "configuration files.", + ) + group._addoption( + "--continue-on-collection-errors", + action="store_true", + default=False, + dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur.", + ) + group._addoption( + "--rootdir", + action="store", + dest="rootdir", + help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " + "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " + "'$HOME/root_dir'.", + ) + + group = parser.getgroup("collect", "collection") + group.addoption( + "--collectonly", + "--collect-only", + action="store_true", + help="only collect tests, don't execute them.", + ), + group.addoption( + "--pyargs", + action="store_true", + help="try to interpret all arguments as python packages.", + ) + group.addoption( + "--ignore", + action="append", + metavar="path", + help="ignore path during collection (multi-allowed).", + ) + group.addoption( + "--ignore-glob", + action="append", + metavar="path", + help="ignore path pattern during collection (multi-allowed).", + ) + group.addoption( + "--deselect", + action="append", + metavar="nodeid_prefix", + help="deselect item during collection (multi-allowed).", + ) + # when changing this to --conf-cut-dir, config.py Conftest.setinitial + # needs upgrading as well + group.addoption( + "--confcutdir", + dest="confcutdir", + default=None, + metavar="dir", + type=functools.partial(directory_arg, optname="--confcutdir"), + help="only load conftest.py's relative to specified dir.", + ) + group.addoption( + "--noconftest", + action="store_true", + dest="noconftest", + default=False, + help="Don't load any conftest.py files.", + ) + group.addoption( + "--keepduplicates", + "--keep-duplicates", + action="store_true", + dest="keepduplicates", + default=False, + help="Keep duplicate tests.", + ) + group.addoption( + "--collect-in-virtualenv", + action="store_true", + dest="collect_in_virtualenv", + default=False, + help="Don't ignore tests in a local virtualenv directory", + ) + + group = parser.getgroup("debugconfig", "test session debugging and configuration") + group.addoption( + "--basetemp", + dest="basetemp", + default=None, + metavar="dir", + help=( + "base temporary directory for this test run." + "(warning: this directory is removed if it exists)" + ), + ) + + +class _ConfigDeprecated(object): + def __init__(self, config): + self.__dict__["_config"] = config + + def __getattr__(self, attr): + warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2) + return getattr(self._config, attr) + + def __setattr__(self, attr, val): + warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2) + return setattr(self._config, attr, val) + + def __repr__(self): + return "{}({!r})".format(type(self).__name__, self._config) + + +def pytest_configure(config): + __import__("pytest").config = _ConfigDeprecated(config) # compatibility + + +def wrap_session(config, doit): + """Skeleton command line program""" + session = Session(config) + session.exitstatus = EXIT_OK + initstate = 0 + try: + try: + config._do_configure() + initstate = 1 + config.hook.pytest_sessionstart(session=session) + initstate = 2 + session.exitstatus = doit(config, session) or 0 + except UsageError: + raise + except Failed: + session.exitstatus = EXIT_TESTSFAILED + except (KeyboardInterrupt, exit.Exception): + excinfo = _pytest._code.ExceptionInfo.from_current() + exitstatus = EXIT_INTERRUPTED + if initstate <= 2 and isinstance(excinfo.value, exit.Exception): + sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg)) + if excinfo.value.returncode is not None: + exitstatus = excinfo.value.returncode + config.hook.pytest_keyboard_interrupt(excinfo=excinfo) + session.exitstatus = exitstatus + except: # noqa + excinfo = _pytest._code.ExceptionInfo.from_current() + config.notify_exception(excinfo, config.option) + session.exitstatus = EXIT_INTERNALERROR + if excinfo.errisinstance(SystemExit): + sys.stderr.write("mainloop: caught unexpected SystemExit!\n") + + finally: + excinfo = None # Explicitly break reference cycle. + session.startdir.chdir() + if initstate >= 2: + config.hook.pytest_sessionfinish( + session=session, exitstatus=session.exitstatus + ) + config._ensure_unconfigure() + return session.exitstatus + + +def pytest_cmdline_main(config): + return wrap_session(config, _main) + + +def _main(config, session): + """ default command line protocol for initialization, session, + running tests and reporting. """ + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + + if session.testsfailed: + return EXIT_TESTSFAILED + elif session.testscollected == 0: + return EXIT_NOTESTSCOLLECTED + + +def pytest_collection(session): + return session.perform_collect() + + +def pytest_runtestloop(session): + if session.testsfailed and not session.config.option.continue_on_collection_errors: + raise session.Interrupted("%d errors during collection" % session.testsfailed) + + if session.config.option.collectonly: + return True + + for i, item in enumerate(session.items): + nextitem = session.items[i + 1] if i + 1 < len(session.items) else None + item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) + if session.shouldfail: + raise session.Failed(session.shouldfail) + if session.shouldstop: + raise session.Interrupted(session.shouldstop) + return True + + +def _in_venv(path): + """Attempts to detect if ``path`` is the root of a Virtual Environment by + checking for the existence of the appropriate activate script""" + bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin") + if not bindir.isdir(): + return False + activates = ( + "activate", + "activate.csh", + "activate.fish", + "Activate", + "Activate.bat", + "Activate.ps1", + ) + return any([fname.basename in activates for fname in bindir.listdir()]) + + +def pytest_ignore_collect(path, config): + ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath()) + ignore_paths = ignore_paths or [] + excludeopt = config.getoption("ignore") + if excludeopt: + ignore_paths.extend([py.path.local(x) for x in excludeopt]) + + if py.path.local(path) in ignore_paths: + return True + + ignore_globs = config._getconftest_pathlist( + "collect_ignore_glob", path=path.dirpath() + ) + ignore_globs = ignore_globs or [] + excludeglobopt = config.getoption("ignore_glob") + if excludeglobopt: + ignore_globs.extend([py.path.local(x) for x in excludeglobopt]) + + if any( + fnmatch.fnmatch(six.text_type(path), six.text_type(glob)) + for glob in ignore_globs + ): + return True + + allow_in_venv = config.getoption("collect_in_virtualenv") + if not allow_in_venv and _in_venv(path): + return True + + return False + + +def pytest_collection_modifyitems(items, config): + deselect_prefixes = tuple(config.getoption("deselect") or []) + if not deselect_prefixes: + return + + remaining = [] + deselected = [] + for colitem in items: + if colitem.nodeid.startswith(deselect_prefixes): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +@contextlib.contextmanager +def _patched_find_module(): + """Patch bug in pkgutil.ImpImporter.find_module + + When using pkgutil.find_loader on python<3.4 it removes symlinks + from the path due to a call to os.path.realpath. This is not consistent + with actually doing the import (in these versions, pkgutil and __import__ + did not share the same underlying code). This can break conftest + discovery for pytest where symlinks are involved. + + The only supported python<3.4 by pytest is python 2.7. + """ + if six.PY2: # python 3.4+ uses importlib instead + + def find_module_patched(self, fullname, path=None): + # Note: we ignore 'path' argument since it is only used via meta_path + subname = fullname.split(".")[-1] + if subname != fullname and self.path is None: + return None + if self.path is None: + path = None + else: + # original: path = [os.path.realpath(self.path)] + path = [self.path] + try: + file, filename, etc = pkgutil.imp.find_module(subname, path) + except ImportError: + return None + return pkgutil.ImpLoader(fullname, file, filename, etc) + + old_find_module = pkgutil.ImpImporter.find_module + pkgutil.ImpImporter.find_module = find_module_patched + try: + yield + finally: + pkgutil.ImpImporter.find_module = old_find_module + else: + yield + + +class FSHookProxy(object): + def __init__(self, fspath, pm, remove_mods): + self.fspath = fspath + self.pm = pm + self.remove_mods = remove_mods + + def __getattr__(self, name): + x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) + self.__dict__[name] = x + return x + + +class NoMatch(Exception): + """ raised if matching cannot locate a matching names. """ + + +class Interrupted(KeyboardInterrupt): + """ signals an interrupted test run. """ + + __module__ = "builtins" # for py3 + + +class Failed(Exception): + """ signals a stop as failed test run. """ + + +@attr.s +class _bestrelpath_cache(dict): + path = attr.ib() + + def __missing__(self, path): + r = self.path.bestrelpath(path) + self[path] = r + return r + + +class Session(nodes.FSCollector): + Interrupted = Interrupted + Failed = Failed + + def __init__(self, config): + nodes.FSCollector.__init__( + self, config.rootdir, parent=None, config=config, session=self, nodeid="" + ) + self.testsfailed = 0 + self.testscollected = 0 + self.shouldstop = False + self.shouldfail = False + self.trace = config.trace.root.get("collection") + self._norecursepatterns = config.getini("norecursedirs") + self.startdir = py.path.local() + self._initialpaths = frozenset() + # Keep track of any collected nodes in here, so we don't duplicate fixtures + self._node_cache = {} + self._bestrelpathcache = _bestrelpath_cache(config.rootdir) + # Dirnames of pkgs with dunder-init files. + self._pkg_roots = {} + + self.config.pluginmanager.register(self, name="session") + + def __repr__(self): + return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % ( + self.__class__.__name__, + self.name, + getattr(self, "exitstatus", ""), + self.testsfailed, + self.testscollected, + ) + + def _node_location_to_relpath(self, node_path): + # bestrelpath is a quite slow function + return self._bestrelpathcache[node_path] + + @hookimpl(tryfirst=True) + def pytest_collectstart(self): + if self.shouldfail: + raise self.Failed(self.shouldfail) + if self.shouldstop: + raise self.Interrupted(self.shouldstop) + + @hookimpl(tryfirst=True) + def pytest_runtest_logreport(self, report): + if report.failed and not hasattr(report, "wasxfail"): + self.testsfailed += 1 + maxfail = self.config.getvalue("maxfail") + if maxfail and self.testsfailed >= maxfail: + self.shouldfail = "stopping after %d failures" % (self.testsfailed) + + pytest_collectreport = pytest_runtest_logreport + + def isinitpath(self, path): + return path in self._initialpaths + + def gethookproxy(self, fspath): + # check if we have the common case of running + # hooks with all conftest.py files + pm = self.config.pluginmanager + my_conftestmodules = pm._getconftestmodules(fspath) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + if remove_mods: + # one or more conftests are not in use at this fspath + proxy = FSHookProxy(fspath, pm, remove_mods) + else: + # all plugis are active for this fspath + proxy = self.config.hook + return proxy + + def perform_collect(self, args=None, genitems=True): + hook = self.config.hook + try: + items = self._perform_collect(args, genitems) + self.config.pluginmanager.check_pending() + hook.pytest_collection_modifyitems( + session=self, config=self.config, items=items + ) + finally: + hook.pytest_collection_finish(session=self) + self.testscollected = len(items) + return items + + def _perform_collect(self, args, genitems): + if args is None: + args = self.config.args + self.trace("perform_collect", self, args) + self.trace.root.indent += 1 + self._notfound = [] + initialpaths = [] + self._initialparts = [] + self.items = items = [] + for arg in args: + parts = self._parsearg(arg) + self._initialparts.append(parts) + initialpaths.append(parts[0]) + self._initialpaths = frozenset(initialpaths) + rep = collect_one_node(self) + self.ihook.pytest_collectreport(report=rep) + self.trace.root.indent -= 1 + if self._notfound: + errors = [] + for arg, exc in self._notfound: + line = "(no name %r in any of %r)" % (arg, exc.args[0]) + errors.append("not found: %s\n%s" % (arg, line)) + # XXX: test this + raise UsageError(*errors) + if not genitems: + return rep.result + else: + if rep.passed: + for node in rep.result: + self.items.extend(self.genitems(node)) + return items + + def collect(self): + for initialpart in self._initialparts: + arg = "::".join(map(str, initialpart)) + self.trace("processing argument", arg) + self.trace.root.indent += 1 + try: + for x in self._collect(arg): + yield x + except NoMatch: + # we are inside a make_report hook so + # we cannot directly pass through the exception + self._notfound.append((arg, sys.exc_info()[1])) + + self.trace.root.indent -= 1 + + def _collect(self, arg): + from _pytest.python import Package + + names = self._parsearg(arg) + argpath = names.pop(0) + + # Start with a Session root, and delve to argpath item (dir or file) + # and stack all Packages found on the way. + # No point in finding packages when collecting doctests + if not self.config.getoption("doctestmodules", False): + pm = self.config.pluginmanager + for parent in reversed(argpath.parts()): + if pm._confcutdir and pm._confcutdir.relto(parent): + break + + if parent.isdir(): + pkginit = parent.join("__init__.py") + if pkginit.isfile(): + if pkginit not in self._node_cache: + col = self._collectfile(pkginit, handle_dupes=False) + if col: + if isinstance(col[0], Package): + self._pkg_roots[parent] = col[0] + # always store a list in the cache, matchnodes expects it + self._node_cache[col[0].fspath] = [col[0]] + + # If it's a directory argument, recurse and look for any Subpackages. + # Let the Package collector deal with subnodes, don't collect here. + if argpath.check(dir=1): + assert not names, "invalid arg %r" % (arg,) + + seen_dirs = set() + for path in argpath.visit( + fil=self._visit_filter, rec=self._recurse, bf=True, sort=True + ): + dirpath = path.dirpath() + if dirpath not in seen_dirs: + # Collect packages first. + seen_dirs.add(dirpath) + pkginit = dirpath.join("__init__.py") + if pkginit.exists(): + for x in self._collectfile(pkginit): + yield x + if isinstance(x, Package): + self._pkg_roots[dirpath] = x + if dirpath in self._pkg_roots: + # Do not collect packages here. + continue + + for x in self._collectfile(path): + key = (type(x), x.fspath) + if key in self._node_cache: + yield self._node_cache[key] + else: + self._node_cache[key] = x + yield x + else: + assert argpath.check(file=1) + + if argpath in self._node_cache: + col = self._node_cache[argpath] + else: + collect_root = self._pkg_roots.get(argpath.dirname, self) + col = collect_root._collectfile(argpath, handle_dupes=False) + if col: + self._node_cache[argpath] = col + m = self.matchnodes(col, names) + # If __init__.py was the only file requested, then the matched node will be + # the corresponding Package, and the first yielded item will be the __init__ + # Module itself, so just use that. If this special case isn't taken, then all + # the files in the package will be yielded. + if argpath.basename == "__init__.py": + yield next(m[0].collect()) + return + for y in m: + yield y + + def _collectfile(self, path, handle_dupes=True): + assert path.isfile(), "%r is not a file (isdir=%r, exists=%r, islink=%r)" % ( + path, + path.isdir(), + path.exists(), + path.islink(), + ) + ihook = self.gethookproxy(path) + if not self.isinitpath(path): + if ihook.pytest_ignore_collect(path=path, config=self.config): + return () + + if handle_dupes: + keepduplicates = self.config.getoption("keepduplicates") + if not keepduplicates: + duplicate_paths = self.config.pluginmanager._duplicatepaths + if path in duplicate_paths: + return () + else: + duplicate_paths.add(path) + + return ihook.pytest_collect_file(path=path, parent=self) + + def _recurse(self, dirpath): + if dirpath.basename == "__pycache__": + return False + ihook = self.gethookproxy(dirpath.dirpath()) + if ihook.pytest_ignore_collect(path=dirpath, config=self.config): + return False + for pat in self._norecursepatterns: + if dirpath.check(fnmatch=pat): + return False + ihook = self.gethookproxy(dirpath) + ihook.pytest_collect_directory(path=dirpath, parent=self) + return True + + if six.PY2: + + @staticmethod + def _visit_filter(f): + return f.check(file=1) and not f.strpath.endswith("*.pyc") + + else: + + @staticmethod + def _visit_filter(f): + return f.check(file=1) + + def _tryconvertpyarg(self, x): + """Convert a dotted module name to path.""" + try: + with _patched_find_module(): + loader = pkgutil.find_loader(x) + except ImportError: + return x + if loader is None: + return x + # This method is sometimes invoked when AssertionRewritingHook, which + # does not define a get_filename method, is already in place: + try: + with _patched_find_module(): + path = loader.get_filename(x) + except AttributeError: + # Retrieve path from AssertionRewritingHook: + path = loader.modules[x][0].co_filename + if loader.is_package(x): + path = os.path.dirname(path) + return path + + def _parsearg(self, arg): + """ return (fspath, names) tuple after checking the file exists. """ + parts = str(arg).split("::") + if self.config.option.pyargs: + parts[0] = self._tryconvertpyarg(parts[0]) + relpath = parts[0].replace("/", os.sep) + path = self.config.invocation_dir.join(relpath, abs=True) + if not path.check(): + if self.config.option.pyargs: + raise UsageError( + "file or package not found: " + arg + " (missing __init__.py?)" + ) + raise UsageError("file not found: " + arg) + parts[0] = path.realpath() + return parts + + def matchnodes(self, matching, names): + self.trace("matchnodes", matching, names) + self.trace.root.indent += 1 + nodes = self._matchnodes(matching, names) + num = len(nodes) + self.trace("matchnodes finished -> ", num, "nodes") + self.trace.root.indent -= 1 + if num == 0: + raise NoMatch(matching, names[:1]) + return nodes + + def _matchnodes(self, matching, names): + if not matching or not names: + return matching + name = names[0] + assert name + nextnames = names[1:] + resultnodes = [] + for node in matching: + if isinstance(node, nodes.Item): + if not names: + resultnodes.append(node) + continue + assert isinstance(node, nodes.Collector) + key = (type(node), node.nodeid) + if key in self._node_cache: + rep = self._node_cache[key] + else: + rep = collect_one_node(node) + self._node_cache[key] = rep + if rep.passed: + has_matched = False + for x in rep.result: + # TODO: remove parametrized workaround once collection structure contains parametrization + if x.name == name or x.name.split("[")[0] == name: + resultnodes.extend(self.matchnodes([x], nextnames)) + has_matched = True + # XXX accept IDs that don't have "()" for class instances + if not has_matched and len(rep.result) == 1 and x.name == "()": + nextnames.insert(0, name) + resultnodes.extend(self.matchnodes([x], nextnames)) + else: + # report collection failures here to avoid failing to run some test + # specified in the command line because the module could not be + # imported (#134) + node.ihook.pytest_collectreport(report=rep) + return resultnodes + + def genitems(self, node): + self.trace("genitems", node) + if isinstance(node, nodes.Item): + node.ihook.pytest_itemcollected(item=node) + yield node + else: + assert isinstance(node, nodes.Collector) + rep = collect_one_node(node) + if rep.passed: + for subnode in rep.result: + for x in self.genitems(subnode): + yield x + node.ihook.pytest_collectreport(report=rep) diff --git a/env_27/lib/python2.7/site-packages/_pytest/mark/__init__.py b/env_27/lib/python2.7/site-packages/_pytest/mark/__init__.py new file mode 100644 index 0000000..e7f08e1 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/mark/__init__.py @@ -0,0 +1,163 @@ +""" generic mechanism for marking and selecting python functions. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from .legacy import matchkeyword +from .legacy import matchmark +from .structures import EMPTY_PARAMETERSET_OPTION +from .structures import get_empty_parameterset_mark +from .structures import Mark +from .structures import MARK_GEN +from .structures import MarkDecorator +from .structures import MarkGenerator +from .structures import ParameterSet +from _pytest.config import UsageError + +__all__ = ["Mark", "MarkDecorator", "MarkGenerator", "get_empty_parameterset_mark"] + + +def param(*values, **kw): + """Specify a parameter in `pytest.mark.parametrize`_ calls or + :ref:`parametrized fixtures `. + + .. code-block:: python + + @pytest.mark.parametrize("test_input,expected", [ + ("3+5", 8), + pytest.param("6*9", 42, marks=pytest.mark.xfail), + ]) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + :param values: variable args of the values of the parameter set, in order. + :keyword marks: a single mark or a list of marks to be applied to this parameter set. + :keyword str id: the id to attribute to this parameter set. + """ + return ParameterSet.param(*values, **kw) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + "-k", + action="store", + dest="keyword", + default="", + metavar="EXPRESSION", + help="only run tests which match the given substring expression. " + "An expression is a python evaluatable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " + "-k 'not test_method and not test_other' will eliminate the matches. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them.", + ) + + group._addoption( + "-m", + action="store", + dest="markexpr", + default="", + metavar="MARKEXPR", + help="only run tests matching given mark expression. " + "example: -m 'mark1 and not mark2'.", + ) + + group.addoption( + "--markers", + action="store_true", + help="show markers (builtin, plugin and per-project ones).", + ) + + parser.addini("markers", "markers for test functions", "linelist") + parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets") + + +def pytest_cmdline_main(config): + import _pytest.config + + if config.option.markers: + config._do_configure() + tw = _pytest.config.create_terminal_writer(config) + for line in config.getini("markers"): + parts = line.split(":", 1) + name = parts[0] + rest = parts[1] if len(parts) == 2 else "" + tw.write("@pytest.mark.%s:" % name, bold=True) + tw.line(rest) + tw.line() + config._ensure_unconfigure() + return 0 + + +pytest_cmdline_main.tryfirst = True + + +def deselect_by_keyword(items, config): + keywordexpr = config.option.keyword.lstrip() + if keywordexpr.startswith("-"): + keywordexpr = "not " + keywordexpr[1:] + selectuntil = False + if keywordexpr[-1:] == ":": + selectuntil = True + keywordexpr = keywordexpr[:-1] + + remaining = [] + deselected = [] + for colitem in items: + if keywordexpr and not matchkeyword(colitem, keywordexpr): + deselected.append(colitem) + else: + if selectuntil: + keywordexpr = None + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def deselect_by_mark(items, config): + matchexpr = config.option.markexpr + if not matchexpr: + return + + remaining = [] + deselected = [] + for item in items: + if matchmark(item, matchexpr): + remaining.append(item) + else: + deselected.append(item) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def pytest_collection_modifyitems(items, config): + deselect_by_keyword(items, config) + deselect_by_mark(items, config) + + +def pytest_configure(config): + config._old_mark_config = MARK_GEN._config + if config.option.strict: + MARK_GEN._config = config + + empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) + + if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): + raise UsageError( + "{!s} must be one of skip, xfail or fail_at_collect" + " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset) + ) + + +def pytest_unconfigure(config): + MARK_GEN._config = getattr(config, "_old_mark_config", None) diff --git a/env_27/lib/python2.7/site-packages/_pytest/mark/evaluate.py b/env_27/lib/python2.7/site-packages/_pytest/mark/evaluate.py new file mode 100644 index 0000000..793bff7 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/mark/evaluate.py @@ -0,0 +1,125 @@ +import os +import platform +import sys +import traceback + +import six + +from ..outcomes import fail +from ..outcomes import TEST_OUTCOME + + +def cached_eval(config, expr, d): + if not hasattr(config, "_evalcache"): + config._evalcache = {} + try: + return config._evalcache[expr] + except KeyError: + import _pytest._code + + exprcode = _pytest._code.compile(expr, mode="eval") + config._evalcache[expr] = x = eval(exprcode, d) + return x + + +class MarkEvaluator(object): + def __init__(self, item, name): + self.item = item + self._marks = None + self._mark = None + self._mark_name = name + + def __bool__(self): + # dont cache here to prevent staleness + return bool(self._get_marks()) + + __nonzero__ = __bool__ + + def wasvalid(self): + return not hasattr(self, "exc") + + def _get_marks(self): + return list(self.item.iter_markers(name=self._mark_name)) + + def invalidraise(self, exc): + raises = self.get("raises") + if not raises: + return + return not isinstance(exc, raises) + + def istrue(self): + try: + return self._istrue() + except TEST_OUTCOME: + self.exc = sys.exc_info() + if isinstance(self.exc[1], SyntaxError): + msg = [" " * (self.exc[1].offset + 4) + "^"] + msg.append("SyntaxError: invalid syntax") + else: + msg = traceback.format_exception_only(*self.exc[:2]) + fail( + "Error evaluating %r expression\n" + " %s\n" + "%s" % (self._mark_name, self.expr, "\n".join(msg)), + pytrace=False, + ) + + def _getglobals(self): + d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config} + if hasattr(self.item, "obj"): + d.update(self.item.obj.__globals__) + return d + + def _istrue(self): + if hasattr(self, "result"): + return self.result + self._marks = self._get_marks() + + if self._marks: + self.result = False + for mark in self._marks: + self._mark = mark + if "condition" in mark.kwargs: + args = (mark.kwargs["condition"],) + else: + args = mark.args + + for expr in args: + self.expr = expr + if isinstance(expr, six.string_types): + d = self._getglobals() + result = cached_eval(self.item.config, expr, d) + else: + if "reason" not in mark.kwargs: + # XXX better be checked at collection time + msg = ( + "you need to specify reason=STRING " + "when using booleans as conditions." + ) + fail(msg) + result = bool(expr) + if result: + self.result = True + self.reason = mark.kwargs.get("reason", None) + self.expr = expr + return self.result + + if not args: + self.result = True + self.reason = mark.kwargs.get("reason", None) + return self.result + return False + + def get(self, attr, default=None): + if self._mark is None: + return default + return self._mark.kwargs.get(attr, default) + + def getexplanation(self): + expl = getattr(self, "reason", None) or self.get("reason", None) + if not expl: + if not hasattr(self, "expr"): + return "" + else: + return "condition: " + str(self.expr) + return expl diff --git a/env_27/lib/python2.7/site-packages/_pytest/mark/legacy.py b/env_27/lib/python2.7/site-packages/_pytest/mark/legacy.py new file mode 100644 index 0000000..f784ffa --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/mark/legacy.py @@ -0,0 +1,102 @@ +""" +this is a place where we put datastructures used by legacy apis +we hope ot remove +""" +import keyword + +import attr + +from _pytest.config import UsageError + + +@attr.s +class MarkMapping(object): + """Provides a local mapping for markers where item access + resolves to True if the marker is present. """ + + own_mark_names = attr.ib() + + @classmethod + def from_item(cls, item): + mark_names = {mark.name for mark in item.iter_markers()} + return cls(mark_names) + + def __getitem__(self, name): + return name in self.own_mark_names + + +class KeywordMapping(object): + """Provides a local mapping for keywords. + Given a list of names, map any substring of one of these names to True. + """ + + def __init__(self, names): + self._names = names + + @classmethod + def from_item(cls, item): + mapped_names = set() + + # Add the names of the current item and any parent items + import pytest + + for item in item.listchain(): + if not isinstance(item, pytest.Instance): + mapped_names.add(item.name) + + # Add the names added as extra keywords to current or parent items + mapped_names.update(item.listextrakeywords()) + + # Add the names attached to the current function through direct assignment + if hasattr(item, "function"): + mapped_names.update(item.function.__dict__) + + # add the markers to the keywords as we no longer handle them correctly + mapped_names.update(mark.name for mark in item.iter_markers()) + + return cls(mapped_names) + + def __getitem__(self, subname): + for name in self._names: + if subname in name: + return True + return False + + +python_keywords_allowed_list = ["or", "and", "not"] + + +def matchmark(colitem, markexpr): + """Tries to match on any marker names, attached to the given colitem.""" + try: + return eval(markexpr, {}, MarkMapping.from_item(colitem)) + except SyntaxError as e: + raise SyntaxError(str(e) + "\nMarker expression must be valid Python!") + + +def matchkeyword(colitem, keywordexpr): + """Tries to match given keyword expression to given collector item. + + Will match on the name of colitem, including the names of its parents. + Only matches names of items which are either a :class:`Class` or a + :class:`Function`. + Additionally, matches on names in the 'extra_keyword_matches' set of + any item, as well as names directly assigned to test functions. + """ + mapping = KeywordMapping.from_item(colitem) + if " " not in keywordexpr: + # special case to allow for simple "-k pass" and "-k 1.3" + return mapping[keywordexpr] + elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]: + return not mapping[keywordexpr[4:]] + for kwd in keywordexpr.split(): + if keyword.iskeyword(kwd) and kwd not in python_keywords_allowed_list: + raise UsageError( + "Python keyword '{}' not accepted in expressions passed to '-k'".format( + kwd + ) + ) + try: + return eval(keywordexpr, {}, mapping) + except SyntaxError: + raise UsageError("Wrong expression passed to '-k': {}".format(keywordexpr)) diff --git a/env_27/lib/python2.7/site-packages/_pytest/mark/structures.py b/env_27/lib/python2.7/site-packages/_pytest/mark/structures.py new file mode 100644 index 0000000..0021dd5 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/mark/structures.py @@ -0,0 +1,378 @@ +import inspect +import warnings +from collections import namedtuple +from operator import attrgetter + +import attr +import six + +from ..compat import ascii_escaped +from ..compat import getfslineno +from ..compat import MappingMixin +from ..compat import NOTSET +from _pytest.outcomes import fail + +EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" + + +def alias(name, warning=None): + getter = attrgetter(name) + + def warned(self): + warnings.warn(warning, stacklevel=2) + return getter(self) + + return property(getter if warning is None else warned, doc="alias for " + name) + + +def istestfunc(func): + return ( + hasattr(func, "__call__") + and getattr(func, "__name__", "") != "" + ) + + +def get_empty_parameterset_mark(config, argnames, func): + from ..nodes import Collector + + requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) + if requested_mark in ("", None, "skip"): + mark = MARK_GEN.skip + elif requested_mark == "xfail": + mark = MARK_GEN.xfail(run=False) + elif requested_mark == "fail_at_collect": + f_name = func.__name__ + _, lineno = getfslineno(func) + raise Collector.CollectError( + "Empty parameter set in '%s' at line %d" % (f_name, lineno + 1) + ) + else: + raise LookupError(requested_mark) + fs, lineno = getfslineno(func) + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, + func.__name__, + fs, + lineno, + ) + return mark(reason=reason) + + +class ParameterSet(namedtuple("ParameterSet", "values, marks, id")): + @classmethod + def param(cls, *values, **kw): + marks = kw.pop("marks", ()) + if isinstance(marks, MarkDecorator): + marks = (marks,) + else: + assert isinstance(marks, (tuple, list, set)) + + id_ = kw.pop("id", None) + if id_ is not None: + if not isinstance(id_, six.string_types): + raise TypeError( + "Expected id to be a string, got {}: {!r}".format(type(id_), id_) + ) + id_ = ascii_escaped(id_) + return cls(values, marks, id_) + + @classmethod + def extract_from(cls, parameterset, force_tuple=False): + """ + :param parameterset: + a legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects + + :param force_tuple: + enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests + """ + + if isinstance(parameterset, cls): + return parameterset + if force_tuple: + return cls.param(parameterset) + else: + return cls(parameterset, marks=[], id=None) + + @classmethod + def _for_parametrize(cls, argnames, argvalues, func, config, function_definition): + if not isinstance(argnames, (tuple, list)): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + force_tuple = len(argnames) == 1 + else: + force_tuple = False + parameters = [ + ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues + ] + del argvalues + + if parameters: + # check all parameter sets have the correct number of values + for param in parameters: + if len(param.values) != len(argnames): + msg = ( + '{nodeid}: in "parametrize" the number of names ({names_len}):\n' + " {names}\n" + "must be equal to the number of values ({values_len}):\n" + " {values}" + ) + fail( + msg.format( + nodeid=function_definition.nodeid, + values=param.values, + names=argnames, + names_len=len(argnames), + values_len=len(param.values), + ), + pytrace=False, + ) + else: + # empty parameter set (likely computed at runtime): create a single + # parameter set with NOSET values, with the "empty parameter set" mark applied to it + mark = get_empty_parameterset_mark(config, argnames, func) + parameters.append( + ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) + ) + return argnames, parameters + + +@attr.s(frozen=True) +class Mark(object): + #: name of the mark + name = attr.ib(type=str) + #: positional arguments of the mark decorator + args = attr.ib() # List[object] + #: keyword arguments of the mark decorator + kwargs = attr.ib() # Dict[str, object] + + def combined_with(self, other): + """ + :param other: the mark to combine with + :type other: Mark + :rtype: Mark + + combines by appending aargs and merging the mappings + """ + assert self.name == other.name + return Mark( + self.name, self.args + other.args, dict(self.kwargs, **other.kwargs) + ) + + +@attr.s +class MarkDecorator(object): + """ A decorator for test functions and test classes. When applied + it will create :class:`MarkInfo` objects which may be + :ref:`retrieved by hooks as item keywords `. + MarkDecorator instances are often created like this:: + + mark1 = pytest.mark.NAME # simple MarkDecorator + mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator + + and can then be applied as decorators to test functions:: + + @mark2 + def test_function(): + pass + + When a MarkDecorator instance is called it does the following: + 1. If called with a single class as its only positional argument and no + additional keyword arguments, it attaches itself to the class so it + gets applied automatically to all test cases found in that class. + 2. If called with a single function as its only positional argument and + no additional keyword arguments, it attaches a MarkInfo object to the + function, containing all the arguments already stored internally in + the MarkDecorator. + 3. When called in any other case, it performs a 'fake construction' call, + i.e. it returns a new MarkDecorator instance with the original + MarkDecorator's content updated with the arguments passed to this + call. + + Note: The rules above prevent MarkDecorator objects from storing only a + single function or class reference as their positional argument with no + additional keyword or positional arguments. + + """ + + mark = attr.ib(validator=attr.validators.instance_of(Mark)) + + name = alias("mark.name") + args = alias("mark.args") + kwargs = alias("mark.kwargs") + + @property + def markname(self): + return self.name # for backward-compat (2.4.1 had this attr) + + def __eq__(self, other): + return self.mark == other.mark if isinstance(other, MarkDecorator) else False + + def __repr__(self): + return "" % (self.mark,) + + def with_args(self, *args, **kwargs): + """ return a MarkDecorator with extra arguments added + + unlike call this can be used even if the sole argument is a callable/class + + :return: MarkDecorator + """ + + mark = Mark(self.name, args, kwargs) + return self.__class__(self.mark.combined_with(mark)) + + def __call__(self, *args, **kwargs): + """ if passed a single callable argument: decorate it with mark info. + otherwise add *args/**kwargs in-place to mark information. """ + if args and not kwargs: + func = args[0] + is_class = inspect.isclass(func) + if len(args) == 1 and (istestfunc(func) or is_class): + store_mark(func, self.mark) + return func + return self.with_args(*args, **kwargs) + + +def get_unpacked_marks(obj): + """ + obtain the unpacked marks that are stored on an object + """ + mark_list = getattr(obj, "pytestmark", []) + if not isinstance(mark_list, list): + mark_list = [mark_list] + return normalize_mark_list(mark_list) + + +def normalize_mark_list(mark_list): + """ + normalizes marker decorating helpers to mark objects + + :type mark_list: List[Union[Mark, Markdecorator]] + :rtype: List[Mark] + """ + extracted = [ + getattr(mark, "mark", mark) for mark in mark_list + ] # unpack MarkDecorator + for mark in extracted: + if not isinstance(mark, Mark): + raise TypeError("got {!r} instead of Mark".format(mark)) + return [x for x in extracted if isinstance(x, Mark)] + + +def store_mark(obj, mark): + """store a Mark on an object + this is used to implement the Mark declarations/decorators correctly + """ + assert isinstance(mark, Mark), mark + # always reassign name to avoid updating pytestmark + # in a reference that was only borrowed + obj.pytestmark = get_unpacked_marks(obj) + [mark] + + +class MarkGenerator(object): + """ Factory for :class:`MarkDecorator` objects - exposed as + a ``pytest.mark`` singleton instance. Example:: + + import pytest + @pytest.mark.slowtest + def test_function(): + pass + + will set a 'slowtest' :class:`MarkInfo` object + on the ``test_function`` object. """ + + _config = None + + def __getattr__(self, name): + if name[0] == "_": + raise AttributeError("Marker name must NOT start with underscore") + if self._config is not None: + self._check(name) + return MarkDecorator(Mark(name, (), {})) + + def _check(self, name): + try: + if name in self._markers: + return + except AttributeError: + pass + self._markers = values = set() + for line in self._config.getini("markers"): + marker = line.split(":", 1)[0] + marker = marker.rstrip() + x = marker.split("(", 1)[0] + values.add(x) + if name not in self._markers: + fail("{!r} not a registered marker".format(name), pytrace=False) + + +MARK_GEN = MarkGenerator() + + +class NodeKeywords(MappingMixin): + def __init__(self, node): + self.node = node + self.parent = node.parent + self._markers = {node.name: True} + + def __getitem__(self, key): + try: + return self._markers[key] + except KeyError: + if self.parent is None: + raise + return self.parent.keywords[key] + + def __setitem__(self, key, value): + self._markers[key] = value + + def __delitem__(self, key): + raise ValueError("cannot delete key in keywords dict") + + def __iter__(self): + seen = self._seen() + return iter(seen) + + def _seen(self): + seen = set(self._markers) + if self.parent is not None: + seen.update(self.parent.keywords) + return seen + + def __len__(self): + return len(self._seen()) + + def __repr__(self): + return "" % (self.node,) + + +@attr.s(cmp=False, hash=False) +class NodeMarkers(object): + """ + internal structure for storing marks belonging to a node + + ..warning:: + + unstable api + + """ + + own_markers = attr.ib(default=attr.Factory(list)) + + def update(self, add_markers): + """update the own markers + """ + self.own_markers.extend(add_markers) + + def find(self, name): + """ + find markers in own nodes or parent nodes + needs a better place + """ + for mark in self.own_markers: + if mark.name == name: + yield mark + + def __iter__(self): + return iter(self.own_markers) diff --git a/env_27/lib/python2.7/site-packages/_pytest/monkeypatch.py b/env_27/lib/python2.7/site-packages/_pytest/monkeypatch.py new file mode 100644 index 0000000..f6c1346 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/monkeypatch.py @@ -0,0 +1,323 @@ +""" monkeypatching and mocking functionality. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import re +import sys +import warnings +from contextlib import contextmanager + +import six + +import pytest +from _pytest.fixtures import fixture +from _pytest.pathlib import Path + +RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$") + + +@fixture +def monkeypatch(): + """The returned ``monkeypatch`` fixture provides these + helper methods to modify objects, dictionaries or os.environ:: + + monkeypatch.setattr(obj, name, value, raising=True) + monkeypatch.delattr(obj, name, raising=True) + monkeypatch.setitem(mapping, name, value) + monkeypatch.delitem(obj, name, raising=True) + monkeypatch.setenv(name, value, prepend=False) + monkeypatch.delenv(name, raising=True) + monkeypatch.syspath_prepend(path) + monkeypatch.chdir(path) + + All modifications will be undone after the requesting + test function or fixture has finished. The ``raising`` + parameter determines if a KeyError or AttributeError + will be raised if the set/deletion operation has no target. + """ + mpatch = MonkeyPatch() + yield mpatch + mpatch.undo() + + +def resolve(name): + # simplified from zope.dottedname + parts = name.split(".") + + used = parts.pop(0) + found = __import__(used) + for part in parts: + used += "." + part + try: + found = getattr(found, part) + except AttributeError: + pass + else: + continue + # we use explicit un-nesting of the handling block in order + # to avoid nested exceptions on python 3 + try: + __import__(used) + except ImportError as ex: + # str is used for py2 vs py3 + expected = str(ex).split()[-1] + if expected == used: + raise + else: + raise ImportError("import error in %s: %s" % (used, ex)) + found = annotated_getattr(found, part, used) + return found + + +def annotated_getattr(obj, name, ann): + try: + obj = getattr(obj, name) + except AttributeError: + raise AttributeError( + "%r object at %s has no attribute %r" % (type(obj).__name__, ann, name) + ) + return obj + + +def derive_importpath(import_path, raising): + if not isinstance(import_path, six.string_types) or "." not in import_path: + raise TypeError("must be absolute import path string, not %r" % (import_path,)) + module, attr = import_path.rsplit(".", 1) + target = resolve(module) + if raising: + annotated_getattr(target, attr, ann=module) + return attr, target + + +class Notset(object): + def __repr__(self): + return "" + + +notset = Notset() + + +class MonkeyPatch(object): + """ Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes. + """ + + def __init__(self): + self._setattr = [] + self._setitem = [] + self._cwd = None + self._savesyspath = None + + @contextmanager + def context(self): + """ + Context manager that returns a new :class:`MonkeyPatch` object which + undoes any patching done inside the ``with`` block upon exit: + + .. code-block:: python + + import functools + def test_partial(monkeypatch): + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + + Useful in situations where it is desired to undo some patches before the test ends, + such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples + of this see `#3290 `_. + """ + m = MonkeyPatch() + try: + yield m + finally: + m.undo() + + def setattr(self, target, name, value=notset, raising=True): + """ Set attribute value on target, memorizing the old value. + By default raise AttributeError if the attribute did not exist. + + For convenience you can specify a string as ``target`` which + will be interpreted as a dotted import path, with the last part + being the attribute name. Example: + ``monkeypatch.setattr("os.getcwd", lambda: "/")`` + would set the ``getcwd`` function of the ``os`` module. + + The ``raising`` value determines if the setattr should fail + if the attribute is not already present (defaults to True + which means it will raise). + """ + __tracebackhide__ = True + import inspect + + if value is notset: + if not isinstance(target, six.string_types): + raise TypeError( + "use setattr(target, name, value) or " + "setattr(target, value) with target being a dotted " + "import string" + ) + value = name + name, target = derive_importpath(target, raising) + + oldval = getattr(target, name, notset) + if raising and oldval is notset: + raise AttributeError("%r has no attribute %r" % (target, name)) + + # avoid class descriptors like staticmethod/classmethod + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + setattr(target, name, value) + + def delattr(self, target, name=notset, raising=True): + """ Delete attribute ``name`` from ``target``, by default raise + AttributeError it the attribute did not previously exist. + + If no ``name`` is specified and ``target`` is a string + it will be interpreted as a dotted import path with the + last part being the attribute name. + + If ``raising`` is set to False, no exception will be raised if the + attribute is missing. + """ + __tracebackhide__ = True + import inspect + + if name is notset: + if not isinstance(target, six.string_types): + raise TypeError( + "use delattr(target, name) or " + "delattr(target) with target being a dotted " + "import string" + ) + name, target = derive_importpath(target, raising) + + if not hasattr(target, name): + if raising: + raise AttributeError(name) + else: + oldval = getattr(target, name, notset) + # Avoid class descriptors like staticmethod/classmethod. + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + delattr(target, name) + + def setitem(self, dic, name, value): + """ Set dictionary entry ``name`` to value. """ + self._setitem.append((dic, name, dic.get(name, notset))) + dic[name] = value + + def delitem(self, dic, name, raising=True): + """ Delete ``name`` from dict. Raise KeyError if it doesn't exist. + + If ``raising`` is set to False, no exception will be raised if the + key is missing. + """ + if name not in dic: + if raising: + raise KeyError(name) + else: + self._setitem.append((dic, name, dic.get(name, notset))) + del dic[name] + + def _warn_if_env_name_is_not_str(self, name): + """On Python 2, warn if the given environment variable name is not a native str (#4056)""" + if six.PY2 and not isinstance(name, str): + warnings.warn( + pytest.PytestWarning( + "Environment variable name {!r} should be str".format(name) + ) + ) + + def setenv(self, name, value, prepend=None): + """ Set environment variable ``name`` to ``value``. If ``prepend`` + is a character, read the current environment variable value + and prepend the ``value`` adjoined with the ``prepend`` character.""" + if not isinstance(value, str): + warnings.warn( + pytest.PytestWarning( + "Value of environment variable {name} type should be str, but got " + "{value!r} (type: {type}); converted to str implicitly".format( + name=name, value=value, type=type(value).__name__ + ) + ), + stacklevel=2, + ) + value = str(value) + if prepend and name in os.environ: + value = value + prepend + os.environ[name] + self._warn_if_env_name_is_not_str(name) + self.setitem(os.environ, name, value) + + def delenv(self, name, raising=True): + """ Delete ``name`` from the environment. Raise KeyError if it does + not exist. + + If ``raising`` is set to False, no exception will be raised if the + environment variable is missing. + """ + self._warn_if_env_name_is_not_str(name) + self.delitem(os.environ, name, raising=raising) + + def syspath_prepend(self, path): + """ Prepend ``path`` to ``sys.path`` list of import locations. """ + from pkg_resources import fixup_namespace_packages + + if self._savesyspath is None: + self._savesyspath = sys.path[:] + sys.path.insert(0, str(path)) + + # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 + fixup_namespace_packages(str(path)) + + def chdir(self, path): + """ Change the current working directory to the specified path. + Path can be a string or a py.path.local object. + """ + if self._cwd is None: + self._cwd = os.getcwd() + if hasattr(path, "chdir"): + path.chdir() + elif isinstance(path, Path): + # modern python uses the fspath protocol here LEGACY + os.chdir(str(path)) + else: + os.chdir(path) + + def undo(self): + """ Undo previous changes. This call consumes the + undo stack. Calling it a second time has no effect unless + you do more monkeypatching after the undo call. + + There is generally no need to call `undo()`, since it is + called automatically during tear-down. + + Note that the same `monkeypatch` fixture is used across a + single test function invocation. If `monkeypatch` is used both by + the test function itself and one of the test fixtures, + calling `undo()` will undo all of the changes made in + both functions. + """ + for obj, name, value in reversed(self._setattr): + if value is not notset: + setattr(obj, name, value) + else: + delattr(obj, name) + self._setattr[:] = [] + for dictionary, name, value in reversed(self._setitem): + if value is notset: + try: + del dictionary[name] + except KeyError: + pass # was already deleted, so we have the desired state + else: + dictionary[name] = value + self._setitem[:] = [] + if self._savesyspath is not None: + sys.path[:] = self._savesyspath + self._savesyspath = None + + if self._cwd is not None: + os.chdir(self._cwd) + self._cwd = None diff --git a/env_27/lib/python2.7/site-packages/_pytest/nodes.py b/env_27/lib/python2.7/site-packages/_pytest/nodes.py new file mode 100644 index 0000000..fcaf593 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/nodes.py @@ -0,0 +1,428 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import warnings + +import py +import six + +import _pytest._code +from _pytest.compat import getfslineno +from _pytest.mark.structures import NodeKeywords +from _pytest.outcomes import fail + +SEP = "/" + +tracebackcutdir = py.path.local(_pytest.__file__).dirpath() + + +def _splitnode(nodeid): + """Split a nodeid into constituent 'parts'. + + Node IDs are strings, and can be things like: + '' + 'testing/code' + 'testing/code/test_excinfo.py' + 'testing/code/test_excinfo.py::TestFormattedExcinfo' + + Return values are lists e.g. + [] + ['testing', 'code'] + ['testing', 'code', 'test_excinfo.py'] + ['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()'] + """ + if nodeid == "": + # If there is no root node at all, return an empty list so the caller's logic can remain sane + return [] + parts = nodeid.split(SEP) + # Replace single last element 'test_foo.py::Bar' with multiple elements 'test_foo.py', 'Bar' + parts[-1:] = parts[-1].split("::") + return parts + + +def ischildnode(baseid, nodeid): + """Return True if the nodeid is a child node of the baseid. + + E.g. 'foo/bar::Baz' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp' + """ + base_parts = _splitnode(baseid) + node_parts = _splitnode(nodeid) + if len(node_parts) < len(base_parts): + return False + return node_parts[: len(base_parts)] == base_parts + + +class Node(object): + """ base class for Collector and Item the test collection tree. + Collector subclasses have children, Items are terminal nodes.""" + + def __init__( + self, name, parent=None, config=None, session=None, fspath=None, nodeid=None + ): + #: a unique name within the scope of the parent node + self.name = name + + #: the parent collector node. + self.parent = parent + + #: the pytest config object + self.config = config or parent.config + + #: the session this node is part of + self.session = session or parent.session + + #: filesystem path where this node was collected from (can be None) + self.fspath = fspath or getattr(parent, "fspath", None) + + #: keywords/markers collected from all scopes + self.keywords = NodeKeywords(self) + + #: the marker objects belonging to this node + self.own_markers = [] + + #: allow adding of extra keywords to use for matching + self.extra_keyword_matches = set() + + # used for storing artificial fixturedefs for direct parametrization + self._name2pseudofixturedef = {} + + if nodeid is not None: + assert "::()" not in nodeid + self._nodeid = nodeid + else: + self._nodeid = self.parent.nodeid + if self.name != "()": + self._nodeid += "::" + self.name + + @property + def ihook(self): + """ fspath sensitive hook proxy used to call pytest hooks""" + return self.session.gethookproxy(self.fspath) + + def __repr__(self): + return "<%s %s>" % (self.__class__.__name__, getattr(self, "name", None)) + + def warn(self, warning): + """Issue a warning for this item. + + Warnings will be displayed after the test session, unless explicitly suppressed + + :param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning. + + :raise ValueError: if ``warning`` instance is not a subclass of PytestWarning. + + Example usage: + + .. code-block:: python + + node.warn(PytestWarning("some message")) + + """ + from _pytest.warning_types import PytestWarning + + if not isinstance(warning, PytestWarning): + raise ValueError( + "warning must be an instance of PytestWarning or subclass, got {!r}".format( + warning + ) + ) + path, lineno = get_fslocation_from_item(self) + warnings.warn_explicit( + warning, + category=None, + filename=str(path), + lineno=lineno + 1 if lineno is not None else None, + ) + + # methods for ordering nodes + @property + def nodeid(self): + """ a ::-separated string denoting its collection tree address. """ + return self._nodeid + + def __hash__(self): + return hash(self.nodeid) + + def setup(self): + pass + + def teardown(self): + pass + + def listchain(self): + """ return list of all parent collectors up to self, + starting from root of collection tree. """ + chain = [] + item = self + while item is not None: + chain.append(item) + item = item.parent + chain.reverse() + return chain + + def add_marker(self, marker, append=True): + """dynamically add a marker object to the node. + + :type marker: ``str`` or ``pytest.mark.*`` object + :param marker: + ``append=True`` whether to append the marker, + if ``False`` insert at position ``0``. + """ + from _pytest.mark import MarkDecorator, MARK_GEN + + if isinstance(marker, six.string_types): + marker = getattr(MARK_GEN, marker) + elif not isinstance(marker, MarkDecorator): + raise ValueError("is not a string or pytest.mark.* Marker") + self.keywords[marker.name] = marker + if append: + self.own_markers.append(marker.mark) + else: + self.own_markers.insert(0, marker.mark) + + def iter_markers(self, name=None): + """ + :param name: if given, filter the results by the name attribute + + iterate over all markers of the node + """ + return (x[1] for x in self.iter_markers_with_node(name=name)) + + def iter_markers_with_node(self, name=None): + """ + :param name: if given, filter the results by the name attribute + + iterate over all markers of the node + returns sequence of tuples (node, mark) + """ + for node in reversed(self.listchain()): + for mark in node.own_markers: + if name is None or getattr(mark, "name", None) == name: + yield node, mark + + def get_closest_marker(self, name, default=None): + """return the first marker matching the name, from closest (for example function) to farther level (for example + module level). + + :param default: fallback return value of no marker was found + :param name: name to filter by + """ + return next(self.iter_markers(name=name), default) + + def listextrakeywords(self): + """ Return a set of all extra keywords in self and any parents.""" + extra_keywords = set() + for item in self.listchain(): + extra_keywords.update(item.extra_keyword_matches) + return extra_keywords + + def listnames(self): + return [x.name for x in self.listchain()] + + def addfinalizer(self, fin): + """ register a function to be called when this node is finalized. + + This method can only be called when this node is active + in a setup chain, for example during self.setup(). + """ + self.session._setupstate.addfinalizer(fin, self) + + def getparent(self, cls): + """ get the next parent node (including ourself) + which is an instance of the given class""" + current = self + while current and not isinstance(current, cls): + current = current.parent + return current + + def _prunetraceback(self, excinfo): + pass + + def _repr_failure_py(self, excinfo, style=None): + if excinfo.errisinstance(fail.Exception): + if not excinfo.value.pytrace: + return six.text_type(excinfo.value) + fm = self.session._fixturemanager + if excinfo.errisinstance(fm.FixtureLookupError): + return excinfo.value.formatrepr() + tbfilter = True + if self.config.option.fulltrace: + style = "long" + else: + tb = _pytest._code.Traceback([excinfo.traceback[-1]]) + self._prunetraceback(excinfo) + if len(excinfo.traceback) == 0: + excinfo.traceback = tb + tbfilter = False # prunetraceback already does it + if style == "auto": + style = "long" + # XXX should excinfo.getrepr record all data and toterminal() process it? + if style is None: + if self.config.option.tbstyle == "short": + style = "short" + else: + style = "long" + + if self.config.option.verbose > 1: + truncate_locals = False + else: + truncate_locals = True + + try: + os.getcwd() + abspath = False + except OSError: + abspath = True + + return excinfo.getrepr( + funcargs=True, + abspath=abspath, + showlocals=self.config.option.showlocals, + style=style, + tbfilter=tbfilter, + truncate_locals=truncate_locals, + ) + + repr_failure = _repr_failure_py + + +def get_fslocation_from_item(item): + """Tries to extract the actual location from an item, depending on available attributes: + + * "fslocation": a pair (path, lineno) + * "obj": a Python object that the item wraps. + * "fspath": just a path + + :rtype: a tuple of (str|LocalPath, int) with filename and line number. + """ + result = getattr(item, "location", None) + if result is not None: + return result[:2] + obj = getattr(item, "obj", None) + if obj is not None: + return getfslineno(obj) + return getattr(item, "fspath", "unknown location"), -1 + + +class Collector(Node): + """ Collector instances create children through collect() + and thus iteratively build a tree. + """ + + class CollectError(Exception): + """ an error during collection, contains a custom message. """ + + def collect(self): + """ returns a list of children (items and collectors) + for this collection node. + """ + raise NotImplementedError("abstract") + + def repr_failure(self, excinfo): + """ represent a collection failure. """ + if excinfo.errisinstance(self.CollectError): + exc = excinfo.value + return str(exc.args[0]) + + # Respect explicit tbstyle option, but default to "short" + # (None._repr_failure_py defaults to "long" without "fulltrace" option). + tbstyle = self.config.getoption("tbstyle") + if tbstyle == "auto": + tbstyle = "short" + + return self._repr_failure_py(excinfo, style=tbstyle) + + def _prunetraceback(self, excinfo): + if hasattr(self, "fspath"): + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.fspath) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + excinfo.traceback = ntraceback.filter() + + +def _check_initialpaths_for_relpath(session, fspath): + for initial_path in session._initialpaths: + if fspath.common(initial_path) == initial_path: + return fspath.relto(initial_path) + + +class FSCollector(Collector): + def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None): + fspath = py.path.local(fspath) # xxx only for test_resultlog.py? + name = fspath.basename + if parent is not None: + rel = fspath.relto(parent.fspath) + if rel: + name = rel + name = name.replace(os.sep, SEP) + self.fspath = fspath + + session = session or parent.session + + if nodeid is None: + nodeid = self.fspath.relto(session.config.rootdir) + + if not nodeid: + nodeid = _check_initialpaths_for_relpath(session, fspath) + if nodeid and os.sep != SEP: + nodeid = nodeid.replace(os.sep, SEP) + + super(FSCollector, self).__init__( + name, parent, config, session, nodeid=nodeid, fspath=fspath + ) + + +class File(FSCollector): + """ base class for collecting tests from a file. """ + + +class Item(Node): + """ a basic test invocation item. Note that for a single function + there might be multiple test invocation items. + """ + + nextitem = None + + def __init__(self, name, parent=None, config=None, session=None, nodeid=None): + super(Item, self).__init__(name, parent, config, session, nodeid=nodeid) + self._report_sections = [] + + #: user properties is a list of tuples (name, value) that holds user + #: defined properties for this test. + self.user_properties = [] + + def add_report_section(self, when, key, content): + """ + Adds a new report section, similar to what's done internally to add stdout and + stderr captured output:: + + item.add_report_section("call", "stdout", "report section contents") + + :param str when: + One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. + :param str key: + Name of the section, can be customized at will. Pytest uses ``"stdout"`` and + ``"stderr"`` internally. + + :param str content: + The full contents as a string. + """ + if content: + self._report_sections.append((when, key, content)) + + def reportinfo(self): + return self.fspath, None, "" + + @property + def location(self): + try: + return self._location + except AttributeError: + location = self.reportinfo() + fspath = self.session._node_location_to_relpath(location[0]) + location = (fspath, location[1], str(location[2])) + self._location = location + return location diff --git a/env_27/lib/python2.7/site-packages/_pytest/nose.py b/env_27/lib/python2.7/site-packages/_pytest/nose.py new file mode 100644 index 0000000..13dda68 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/nose.py @@ -0,0 +1,68 @@ +""" run test suites written for nose. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys + +import six + +from _pytest import python +from _pytest import runner +from _pytest import unittest +from _pytest.config import hookimpl + + +def get_skip_exceptions(): + skip_classes = set() + for module_name in ("unittest", "unittest2", "nose"): + mod = sys.modules.get(module_name) + if hasattr(mod, "SkipTest"): + skip_classes.add(mod.SkipTest) + return tuple(skip_classes) + + +def pytest_runtest_makereport(item, call): + if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()): + # let's substitute the excinfo with a pytest.skip one + call2 = runner.CallInfo.from_call( + lambda: runner.skip(six.text_type(call.excinfo.value)), call.when + ) + call.excinfo = call2.excinfo + + +@hookimpl(trylast=True) +def pytest_runtest_setup(item): + if is_potential_nosetest(item): + if not call_optional(item.obj, "setup"): + # call module level setup if there is no object level one + call_optional(item.parent.obj, "setup") + # XXX this implies we only call teardown when setup worked + item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item) + + +def teardown_nose(item): + if is_potential_nosetest(item): + if not call_optional(item.obj, "teardown"): + call_optional(item.parent.obj, "teardown") + # if hasattr(item.parent, '_nosegensetup'): + # #call_optional(item._nosegensetup, 'teardown') + # del item.parent._nosegensetup + + +def is_potential_nosetest(item): + # extra check needed since we do not do nose style setup/teardown + # on direct unittest style classes + return isinstance(item, python.Function) and not isinstance( + item, unittest.TestCaseFunction + ) + + +def call_optional(obj, name): + method = getattr(obj, name, None) + isfixture = hasattr(method, "_pytestfixturefunction") + if method is not None and not isfixture and callable(method): + # If there's any problems allow the exception to raise rather than + # silently ignoring them + method() + return True diff --git a/env_27/lib/python2.7/site-packages/_pytest/outcomes.py b/env_27/lib/python2.7/site-packages/_pytest/outcomes.py new file mode 100644 index 0000000..6c2dfb5 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/outcomes.py @@ -0,0 +1,193 @@ +""" +exception classes and constants handling test outcomes +as well as functions creating them +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys + + +class OutcomeException(BaseException): + """ OutcomeException and its subclass instances indicate and + contain info about test and collection outcomes. + """ + + def __init__(self, msg=None, pytrace=True): + BaseException.__init__(self, msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self): + if self.msg: + val = self.msg + if isinstance(val, bytes): + val = val.decode("UTF-8", errors="replace") + return val + return "<%s instance>" % (self.__class__.__name__,) + + __str__ = __repr__ + + +TEST_OUTCOME = (OutcomeException, Exception) + + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = "builtins" + + def __init__(self, msg=None, pytrace=True, allow_module_level=False): + OutcomeException.__init__(self, msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + + +class Failed(OutcomeException): + """ raised from an explicit call to pytest.fail() """ + + __module__ = "builtins" + + +class Exit(Exception): + """ raised for immediate program exits (no tracebacks/summaries)""" + + def __init__(self, msg="unknown reason", returncode=None): + self.msg = msg + self.returncode = returncode + super(Exit, self).__init__(msg) + + +# exposed helper methods + + +def exit(msg, returncode=None): + """ + Exit testing process. + + :param str msg: message to display upon exit. + :param int returncode: return code to be used when exiting pytest. + """ + __tracebackhide__ = True + raise Exit(msg, returncode) + + +exit.Exception = Exit + + +def skip(msg="", **kwargs): + """ + Skip an executing test with the given message. + + This function should be called only during testing (setup, call or teardown) or + during collection by using the ``allow_module_level`` flag. This function can + be called in doctests as well. + + :kwarg bool allow_module_level: allows this function to be called at + module level, skipping the rest of the module. Default to False. + + .. note:: + It is better to use the :ref:`pytest.mark.skipif ref` marker when possible to declare a test to be + skipped under certain conditions like mismatching platforms or + dependencies. + Similarly, use the ``# doctest: +SKIP`` directive (see `doctest.SKIP + `_) + to skip a doctest statically. + """ + __tracebackhide__ = True + allow_module_level = kwargs.pop("allow_module_level", False) + if kwargs: + keys = [k for k in kwargs.keys()] + raise TypeError("unexpected keyword arguments: {}".format(keys)) + raise Skipped(msg=msg, allow_module_level=allow_module_level) + + +skip.Exception = Skipped + + +def fail(msg="", pytrace=True): + """ + Explicitly fail an executing test with the given message. + + :param str msg: the message to show the user as reason for the failure. + :param bool pytrace: if false the msg represents the full failure information and no + python traceback will be reported. + """ + __tracebackhide__ = True + raise Failed(msg=msg, pytrace=pytrace) + + +fail.Exception = Failed + + +class XFailed(fail.Exception): + """ raised from an explicit call to pytest.xfail() """ + + +def xfail(reason=""): + """ + Imperatively xfail an executing test or setup functions with the given reason. + + This function should be called only during testing (setup, call or teardown). + + .. note:: + It is better to use the :ref:`pytest.mark.xfail ref` marker when possible to declare a test to be + xfailed under certain conditions like known bugs or missing features. + """ + __tracebackhide__ = True + raise XFailed(reason) + + +xfail.Exception = XFailed + + +def importorskip(modname, minversion=None, reason=None): + """Imports and returns the requested module ``modname``, or skip the current test + if the module cannot be imported. + + :param str modname: the name of the module to import + :param str minversion: if given, the imported module ``__version__`` attribute must be + at least this minimal version, otherwise the test is still skipped. + :param str reason: if given, this reason is shown as the message when the module + cannot be imported. + """ + import warnings + + __tracebackhide__ = True + compile(modname, "", "eval") # to catch syntaxerrors + should_skip = False + + with warnings.catch_warnings(): + # make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file + warnings.simplefilter("ignore") + try: + __import__(modname) + except ImportError: + # Do not raise chained exception here(#1485) + should_skip = True + if should_skip: + if reason is None: + reason = "could not import %r" % (modname,) + raise Skipped(reason, allow_module_level=True) + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, "__version__", None) + if minversion is not None: + try: + from pkg_resources import parse_version as pv + except ImportError: + raise Skipped( + "we have a required version for %r but can not import " + "pkg_resources to parse version strings." % (modname,), + allow_module_level=True, + ) + if verattr is None or pv(verattr) < pv(minversion): + raise Skipped( + "module %r has __version__ %r, required is: %r" + % (modname, verattr, minversion), + allow_module_level=True, + ) + return mod diff --git a/env_27/lib/python2.7/site-packages/_pytest/pastebin.py b/env_27/lib/python2.7/site-packages/_pytest/pastebin.py new file mode 100644 index 0000000..9559e32 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/pastebin.py @@ -0,0 +1,113 @@ +""" submit failure or test session information to a pastebin service. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import tempfile + +import six + +import pytest + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting") + group._addoption( + "--pastebin", + metavar="mode", + action="store", + dest="pastebin", + default=None, + choices=["failed", "all"], + help="send failed|all info to bpaste.net pastebin service.", + ) + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config): + if config.option.pastebin == "all": + tr = config.pluginmanager.getplugin("terminalreporter") + # if no terminal reporter plugin is present, nothing we can do here; + # this can happen when this function executes in a slave node + # when using pytest-xdist, for example + if tr is not None: + # pastebin file will be utf-8 encoded binary file + config._pastebinfile = tempfile.TemporaryFile("w+b") + oldwrite = tr._tw.write + + def tee_write(s, **kwargs): + oldwrite(s, **kwargs) + if isinstance(s, six.text_type): + s = s.encode("utf-8") + config._pastebinfile.write(s) + + tr._tw.write = tee_write + + +def pytest_unconfigure(config): + if hasattr(config, "_pastebinfile"): + # get terminal contents and delete file + config._pastebinfile.seek(0) + sessionlog = config._pastebinfile.read() + config._pastebinfile.close() + del config._pastebinfile + # undo our patching in the terminal reporter + tr = config.pluginmanager.getplugin("terminalreporter") + del tr._tw.__dict__["write"] + # write summary + tr.write_sep("=", "Sending information to Paste Service") + pastebinurl = create_new_paste(sessionlog) + tr.write_line("pastebin session-log: %s\n" % pastebinurl) + + +def create_new_paste(contents): + """ + Creates a new paste using bpaste.net service. + + :contents: paste contents as utf-8 encoded bytes + :returns: url to the pasted contents + """ + import re + + if sys.version_info < (3, 0): + from urllib import urlopen, urlencode + else: + from urllib.request import urlopen + from urllib.parse import urlencode + + params = { + "code": contents, + "lexer": "python3" if sys.version_info[0] == 3 else "python", + "expiry": "1week", + } + url = "https://bpaste.net" + response = urlopen(url, data=urlencode(params).encode("ascii")).read() + m = re.search(r'href="/raw/(\w+)"', response.decode("utf-8")) + if m: + return "%s/show/%s" % (url, m.group(1)) + else: + return "bad response: " + response + + +def pytest_terminal_summary(terminalreporter): + import _pytest.config + + if terminalreporter.config.option.pastebin != "failed": + return + tr = terminalreporter + if "failed" in tr.stats: + terminalreporter.write_sep("=", "Sending information to Paste Service") + for rep in terminalreporter.stats.get("failed"): + try: + msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc + except AttributeError: + msg = tr._getfailureheadline(rep) + tw = _pytest.config.create_terminal_writer( + terminalreporter.config, stringio=True + ) + rep.toterminal(tw) + s = tw.stringio.getvalue() + assert len(s) + pastebinurl = create_new_paste(s) + tr.write_line("%s --> %s" % (msg, pastebinurl)) diff --git a/env_27/lib/python2.7/site-packages/_pytest/pathlib.py b/env_27/lib/python2.7/site-packages/_pytest/pathlib.py new file mode 100644 index 0000000..5dd00e7 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/pathlib.py @@ -0,0 +1,319 @@ +import atexit +import errno +import fnmatch +import itertools +import operator +import os +import shutil +import sys +import uuid +from functools import reduce +from os.path import expanduser +from os.path import expandvars +from os.path import isabs +from os.path import sep +from posixpath import sep as posix_sep + +import six +from six.moves import map + +from .compat import PY36 + +if PY36: + from pathlib import Path, PurePath +else: + from pathlib2 import Path, PurePath + +__all__ = ["Path", "PurePath"] + + +LOCK_TIMEOUT = 60 * 60 * 3 + +get_lock_path = operator.methodcaller("joinpath", ".lock") + + +def ensure_reset_dir(path): + """ + ensures the given path is an empty directory + """ + if path.exists(): + rmtree(path, force=True) + path.mkdir() + + +def rmtree(path, force=False): + if force: + # NOTE: ignore_errors might leave dead folders around. + # Python needs a rm -rf as a followup. + shutil.rmtree(str(path), ignore_errors=True) + else: + shutil.rmtree(str(path)) + + +def find_prefixed(root, prefix): + """finds all elements in root that begin with the prefix, case insensitive""" + l_prefix = prefix.lower() + for x in root.iterdir(): + if x.name.lower().startswith(l_prefix): + yield x + + +def extract_suffixes(iter, prefix): + """ + :param iter: iterator over path names + :param prefix: expected prefix of the path names + :returns: the parts of the paths following the prefix + """ + p_len = len(prefix) + for p in iter: + yield p.name[p_len:] + + +def find_suffixes(root, prefix): + """combines find_prefixes and extract_suffixes + """ + return extract_suffixes(find_prefixed(root, prefix), prefix) + + +def parse_num(maybe_num): + """parses number path suffixes, returns -1 on error""" + try: + return int(maybe_num) + except ValueError: + return -1 + + +if six.PY2: + + def _max(iterable, default): + """needed due to python2.7 lacking the default argument for max""" + return reduce(max, iterable, default) + + +else: + _max = max + + +def _force_symlink(root, target, link_to): + """helper to create the current symlink + + it's full of race conditions that are reasonably ok to ignore + for the context of best effort linking to the latest testrun + + the presumption being thatin case of much parallelism + the inaccuracy is going to be acceptable + """ + current_symlink = root.joinpath(target) + try: + current_symlink.unlink() + except OSError: + pass + try: + current_symlink.symlink_to(link_to) + except Exception: + pass + + +def make_numbered_dir(root, prefix): + """create a directory with an increased number as suffix for the given prefix""" + for i in range(10): + # try up to 10 times to create the folder + max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1) + new_number = max_existing + 1 + new_path = root.joinpath("{}{}".format(prefix, new_number)) + try: + new_path.mkdir() + except Exception: + pass + else: + _force_symlink(root, prefix + "current", new_path) + return new_path + else: + raise EnvironmentError( + "could not create numbered dir with prefix " + "{prefix} in {root} after 10 tries".format(prefix=prefix, root=root) + ) + + +def create_cleanup_lock(p): + """crates a lock to prevent premature folder cleanup""" + lock_path = get_lock_path(p) + try: + fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) + except OSError as e: + if e.errno == errno.EEXIST: + six.raise_from( + EnvironmentError("cannot create lockfile in {path}".format(path=p)), e + ) + else: + raise + else: + pid = os.getpid() + spid = str(pid) + if not isinstance(spid, bytes): + spid = spid.encode("ascii") + os.write(fd, spid) + os.close(fd) + if not lock_path.is_file(): + raise EnvironmentError("lock path got renamed after successful creation") + return lock_path + + +def register_cleanup_lock_removal(lock_path, register=atexit.register): + """registers a cleanup function for removing a lock, by default on atexit""" + pid = os.getpid() + + def cleanup_on_exit(lock_path=lock_path, original_pid=pid): + current_pid = os.getpid() + if current_pid != original_pid: + # fork + return + try: + lock_path.unlink() + except (OSError, IOError): + pass + + return register(cleanup_on_exit) + + +def maybe_delete_a_numbered_dir(path): + """removes a numbered directory if its lock can be obtained and it does not seem to be in use""" + lock_path = None + try: + lock_path = create_cleanup_lock(path) + parent = path.parent + + garbage = parent.joinpath("garbage-{}".format(uuid.uuid4())) + path.rename(garbage) + rmtree(garbage, force=True) + except (OSError, EnvironmentError): + # known races: + # * other process did a cleanup at the same time + # * deletable folder was found + # * process cwd (Windows) + return + finally: + # if we created the lock, ensure we remove it even if we failed + # to properly remove the numbered dir + if lock_path is not None: + try: + lock_path.unlink() + except (OSError, IOError): + pass + + +def ensure_deletable(path, consider_lock_dead_if_created_before): + """checks if a lock exists and breaks it if its considered dead""" + if path.is_symlink(): + return False + lock = get_lock_path(path) + if not lock.exists(): + return True + try: + lock_time = lock.stat().st_mtime + except Exception: + return False + else: + if lock_time < consider_lock_dead_if_created_before: + lock.unlink() + return True + else: + return False + + +def try_cleanup(path, consider_lock_dead_if_created_before): + """tries to cleanup a folder if we can ensure it's deletable""" + if ensure_deletable(path, consider_lock_dead_if_created_before): + maybe_delete_a_numbered_dir(path) + + +def cleanup_candidates(root, prefix, keep): + """lists candidates for numbered directories to be removed - follows py.path""" + max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1) + max_delete = max_existing - keep + paths = find_prefixed(root, prefix) + paths, paths2 = itertools.tee(paths) + numbers = map(parse_num, extract_suffixes(paths2, prefix)) + for path, number in zip(paths, numbers): + if number <= max_delete: + yield path + + +def cleanup_numbered_dir(root, prefix, keep, consider_lock_dead_if_created_before): + """cleanup for lock driven numbered directories""" + for path in cleanup_candidates(root, prefix, keep): + try_cleanup(path, consider_lock_dead_if_created_before) + for path in root.glob("garbage-*"): + try_cleanup(path, consider_lock_dead_if_created_before) + + +def make_numbered_dir_with_cleanup(root, prefix, keep, lock_timeout): + """creates a numbered dir with a cleanup lock and removes old ones""" + e = None + for i in range(10): + try: + p = make_numbered_dir(root, prefix) + lock_path = create_cleanup_lock(p) + register_cleanup_lock_removal(lock_path) + except Exception as exc: + e = exc + else: + consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout + cleanup_numbered_dir( + root=root, + prefix=prefix, + keep=keep, + consider_lock_dead_if_created_before=consider_lock_dead_if_created_before, + ) + return p + assert e is not None + raise e + + +def resolve_from_str(input, root): + assert not isinstance(input, Path), "would break on py2" + root = Path(root) + input = expanduser(input) + input = expandvars(input) + if isabs(input): + return Path(input) + else: + return root.joinpath(input) + + +def fnmatch_ex(pattern, path): + """FNMatcher port from py.path.common which works with PurePath() instances. + + The difference between this algorithm and PurePath.match() is that the latter matches "**" glob expressions + for each part of the path, while this algorithm uses the whole path instead. + + For example: + "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" with this algorithm, but not with + PurePath.match(). + + This algorithm was ported to keep backward-compatibility with existing settings which assume paths match according + this logic. + + References: + * https://bugs.python.org/issue29249 + * https://bugs.python.org/issue34731 + """ + path = PurePath(path) + iswin32 = sys.platform.startswith("win") + + if iswin32 and sep not in pattern and posix_sep in pattern: + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posix_sep, sep) + + if sep not in pattern: + name = path.name + else: + name = six.text_type(path) + return fnmatch.fnmatch(name, pattern) + + +def parts(s): + parts = s.split(sep) + return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))} diff --git a/env_27/lib/python2.7/site-packages/_pytest/pytester.py b/env_27/lib/python2.7/site-packages/_pytest/pytester.py new file mode 100644 index 0000000..7fc5eaf --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/pytester.py @@ -0,0 +1,1376 @@ +"""(disabled by default) support for testing pytest and pytest plugins.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import codecs +import gc +import os +import platform +import re +import subprocess +import sys +import time +import traceback +from fnmatch import fnmatch +from weakref import WeakKeyDictionary + +import py +import six + +import pytest +from _pytest._code import Source +from _pytest._io.saferepr import saferepr +from _pytest.assertion.rewrite import AssertionRewritingHook +from _pytest.capture import MultiCapture +from _pytest.capture import SysCapture +from _pytest.compat import safe_str +from _pytest.compat import Sequence +from _pytest.main import EXIT_INTERRUPTED +from _pytest.main import EXIT_OK +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.pathlib import Path + +IGNORE_PAM = [ # filenames added when obtaining details about the current user + u"/var/lib/sss/mc/passwd" +] + + +def pytest_addoption(parser): + parser.addoption( + "--lsof", + action="store_true", + dest="lsof", + default=False, + help="run FD checks if lsof is available", + ) + + parser.addoption( + "--runpytest", + default="inprocess", + dest="runpytest", + choices=("inprocess", "subprocess"), + help=( + "run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method" + ), + ) + + parser.addini( + "pytester_example_dir", help="directory to take the pytester example files from" + ) + + +def pytest_configure(config): + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + +def raise_on_kwargs(kwargs): + if kwargs: + raise TypeError("Unexpected arguments: {}".format(", ".join(sorted(kwargs)))) + + +class LsofFdLeakChecker(object): + def get_open_files(self): + out = self._exec_lsof() + open_files = self._parse_lsof_output(out) + return open_files + + def _exec_lsof(self): + pid = os.getpid() + # py3: use subprocess.DEVNULL directly. + with open(os.devnull, "wb") as devnull: + return subprocess.check_output( + ("lsof", "-Ffn0", "-p", str(pid)), stderr=devnull + ).decode() + + def _parse_lsof_output(self, out): + def isopen(line): + return line.startswith("f") and ( + "deleted" not in line + and "mem" not in line + and "txt" not in line + and "cwd" not in line + ) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split("\0") + fd = fields[0][1:] + filename = fields[1][1:] + if filename in IGNORE_PAM: + continue + if filename.startswith("/"): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self): + try: + subprocess.check_output(("lsof", "-v")) + except (OSError, subprocess.CalledProcessError): + return False + else: + return True + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_runtest_protocol(self, item): + lines1 = self.get_open_files() + yield + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [] + error.append("***** %s FD leakage detected" % len(leaked_files)) + error.extend([str(f) for f in leaked_files]) + error.append("*** Before:") + error.extend([str(f) for f in lines1]) + error.append("*** After:") + error.extend([str(f) for f in lines2]) + error.append(error[0]) + error.append("*** function %s:%s: %s " % item.location) + error.append("See issue #2366") + item.warn(pytest.PytestWarning("\n".join(error))) + + +# XXX copied from execnet's conftest.py - needs to be merged +winpymap = { + "python2.7": r"C:\Python27\python.exe", + "python3.4": r"C:\Python34\python.exe", + "python3.5": r"C:\Python35\python.exe", + "python3.6": r"C:\Python36\python.exe", +} + + +# used at least by pytest-xdist plugin + + +@pytest.fixture +def _pytest(request): + """Return a helper which offers a gethookrecorder(hook) method which + returns a HookRecorder instance which helps to make assertions about called + hooks. + + """ + return PytestArg(request) + + +class PytestArg(object): + def __init__(self, request): + self.request = request + + def gethookrecorder(self, hook): + hookrecorder = HookRecorder(hook._pm) + self.request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + + +def get_public_names(values): + """Only return names from iterator values without a leading underscore.""" + return [x for x in values if x[0] != "_"] + + +class ParsedCall(object): + def __init__(self, name, kwargs): + self.__dict__.update(kwargs) + self._name = name + + def __repr__(self): + d = self.__dict__.copy() + del d["_name"] + return "" % (self._name, d) + + +class HookRecorder(object): + """Record all hooks called in a plugin manager. + + This wraps all the hook calls in the plugin manager, recording each call + before propagating the normal calls. + + """ + + def __init__(self, pluginmanager): + self._pluginmanager = pluginmanager + self.calls = [] + + def before(hook_name, hook_impls, kwargs): + self.calls.append(ParsedCall(hook_name, kwargs)) + + def after(outcome, hook_name, hook_impls, kwargs): + pass + + self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) + + def finish_recording(self): + self._undo_wrapping() + + def getcalls(self, names): + if isinstance(names, str): + names = names.split() + return [call for call in self.calls if call._name in names] + + def assert_contains(self, entries): + __tracebackhide__ = True + i = 0 + entries = list(entries) + backlocals = sys._getframe(1).f_locals + while entries: + name, check = entries.pop(0) + for ind, call in enumerate(self.calls[i:]): + if call._name == name: + print("NAMEMATCH", name, call) + if eval(check, backlocals, call.__dict__): + print("CHECKERMATCH", repr(check), "->", call) + else: + print("NOCHECKERMATCH", repr(check), "-", call) + continue + i += ind + 1 + break + print("NONAMEMATCH", name, "with", call) + else: + pytest.fail("could not find %r check %r" % (name, check)) + + def popcall(self, name): + __tracebackhide__ = True + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + lines = ["could not find call %r, in:" % (name,)] + lines.extend([" %s" % x for x in self.calls]) + pytest.fail("\n".join(lines)) + + def getcall(self, name): + values = self.getcalls(name) + assert len(values) == 1, (name, values) + return values[0] + + # functionality for test reports + + def getreports(self, names="pytest_runtest_logreport pytest_collectreport"): + return [x.report for x in self.getcalls(names)] + + def matchreport( + self, + inamepart="", + names="pytest_runtest_logreport pytest_collectreport", + when=None, + ): + """return a testreport whose dotted import path matches""" + values = [] + for rep in self.getreports(names=names): + if not when and rep.when != "call" and rep.passed: + # setup/teardown passing reports - let's ignore those + continue + if when and rep.when != when: + continue + if not inamepart or inamepart in rep.nodeid.split("::"): + values.append(rep) + if not values: + raise ValueError( + "could not find test report matching %r: " + "no test reports at all!" % (inamepart,) + ) + if len(values) > 1: + raise ValueError( + "found 2 or more testreports matching %r: %s" % (inamepart, values) + ) + return values[0] + + def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"): + return [rep for rep in self.getreports(names) if rep.failed] + + def getfailedcollections(self): + return self.getfailures("pytest_collectreport") + + def listoutcomes(self): + passed = [] + skipped = [] + failed = [] + for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"): + if rep.passed: + if rep.when == "call": + passed.append(rep) + elif rep.skipped: + skipped.append(rep) + elif rep.failed: + failed.append(rep) + return passed, skipped, failed + + def countoutcomes(self): + return [len(x) for x in self.listoutcomes()] + + def assertoutcome(self, passed=0, skipped=0, failed=0): + realpassed, realskipped, realfailed = self.listoutcomes() + assert passed == len(realpassed) + assert skipped == len(realskipped) + assert failed == len(realfailed) + + def clear(self): + self.calls[:] = [] + + +@pytest.fixture +def linecomp(request): + return LineComp() + + +@pytest.fixture(name="LineMatcher") +def LineMatcher_fixture(request): + return LineMatcher + + +@pytest.fixture +def testdir(request, tmpdir_factory): + return Testdir(request, tmpdir_factory) + + +@pytest.fixture +def _config_for_test(): + from _pytest.config import get_config + + config = get_config() + yield config + config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles. + + +rex_outcome = re.compile(r"(\d+) ([\w-]+)") + + +class RunResult(object): + """The result of running a command. + + Attributes: + + :ret: the return value + :outlines: list of lines captured from stdout + :errlines: list of lines captures from stderr + :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to + reconstruct stdout or the commonly used ``stdout.fnmatch_lines()`` + method + :stderr: :py:class:`LineMatcher` of stderr + :duration: duration in seconds + + """ + + def __init__(self, ret, outlines, errlines, duration): + self.ret = ret + self.outlines = outlines + self.errlines = errlines + self.stdout = LineMatcher(outlines) + self.stderr = LineMatcher(errlines) + self.duration = duration + + def __repr__(self): + return ( + "" + % (self.ret, len(self.stdout.lines), len(self.stderr.lines), self.duration) + ) + + def parseoutcomes(self): + """Return a dictionary of outcomestring->num from parsing the terminal + output that the test process produced. + + """ + for line in reversed(self.outlines): + if "seconds" in line: + outcomes = rex_outcome.findall(line) + if outcomes: + d = {} + for num, cat in outcomes: + d[cat] = int(num) + return d + raise ValueError("Pytest terminal report not found") + + def assert_outcomes( + self, passed=0, skipped=0, failed=0, error=0, xpassed=0, xfailed=0 + ): + """Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run. + + """ + d = self.parseoutcomes() + obtained = { + "passed": d.get("passed", 0), + "skipped": d.get("skipped", 0), + "failed": d.get("failed", 0), + "error": d.get("error", 0), + "xpassed": d.get("xpassed", 0), + "xfailed": d.get("xfailed", 0), + } + expected = { + "passed": passed, + "skipped": skipped, + "failed": failed, + "error": error, + "xpassed": xpassed, + "xfailed": xfailed, + } + assert obtained == expected + + +class CwdSnapshot(object): + def __init__(self): + self.__saved = os.getcwd() + + def restore(self): + os.chdir(self.__saved) + + +class SysModulesSnapshot(object): + def __init__(self, preserve=None): + self.__preserve = preserve + self.__saved = dict(sys.modules) + + def restore(self): + if self.__preserve: + self.__saved.update( + (k, m) for k, m in sys.modules.items() if self.__preserve(k) + ) + sys.modules.clear() + sys.modules.update(self.__saved) + + +class SysPathsSnapshot(object): + def __init__(self): + self.__saved = list(sys.path), list(sys.meta_path) + + def restore(self): + sys.path[:], sys.meta_path[:] = self.__saved + + +class Testdir(object): + """Temporary test directory with tools to test/run pytest itself. + + This is based on the ``tmpdir`` fixture but provides a number of methods + which aid with testing pytest itself. Unless :py:meth:`chdir` is used all + methods will use :py:attr:`tmpdir` as their current working directory. + + Attributes: + + :tmpdir: The :py:class:`py.path.local` instance of the temporary directory. + + :plugins: A list of plugins to use with :py:meth:`parseconfig` and + :py:meth:`runpytest`. Initially this is an empty list but plugins can + be added to the list. The type of items to add to the list depends on + the method using them so refer to them for details. + + """ + + class TimeoutExpired(Exception): + pass + + def __init__(self, request, tmpdir_factory): + self.request = request + self._mod_collections = WeakKeyDictionary() + name = request.function.__name__ + self.tmpdir = tmpdir_factory.mktemp(name, numbered=True) + self.test_tmproot = tmpdir_factory.mktemp("tmp-" + name, numbered=True) + os.environ["PYTEST_DEBUG_TEMPROOT"] = str(self.test_tmproot) + os.environ.pop("TOX_ENV_DIR", None) # Ensure that it is not used for caching. + os.environ.pop("PYTEST_ADDOPTS", None) # Do not use outer options. + self.plugins = [] + self._cwd_snapshot = CwdSnapshot() + self._sys_path_snapshot = SysPathsSnapshot() + self._sys_modules_snapshot = self.__take_sys_modules_snapshot() + self.chdir() + self.request.addfinalizer(self.finalize) + method = self.request.config.getoption("--runpytest") + if method == "inprocess": + self._runpytest_method = self.runpytest_inprocess + elif method == "subprocess": + self._runpytest_method = self.runpytest_subprocess + + def __repr__(self): + return "" % (self.tmpdir,) + + def __str__(self): + return str(self.tmpdir) + + def finalize(self): + """Clean up global state artifacts. + + Some methods modify the global interpreter state and this tries to + clean this up. It does not remove the temporary directory however so + it can be looked at after the test run has finished. + + """ + self._sys_modules_snapshot.restore() + self._sys_path_snapshot.restore() + self._cwd_snapshot.restore() + os.environ.pop("PYTEST_DEBUG_TEMPROOT", None) + + def __take_sys_modules_snapshot(self): + # some zope modules used by twisted-related tests keep internal state + # and can't be deleted; we had some trouble in the past with + # `zope.interface` for example + def preserve_module(name): + return name.startswith("zope") + + return SysModulesSnapshot(preserve=preserve_module) + + def make_hook_recorder(self, pluginmanager): + """Create a new :py:class:`HookRecorder` for a PluginManager.""" + pluginmanager.reprec = reprec = HookRecorder(pluginmanager) + self.request.addfinalizer(reprec.finish_recording) + return reprec + + def chdir(self): + """Cd into the temporary directory. + + This is done automatically upon instantiation. + + """ + self.tmpdir.chdir() + + def _makefile(self, ext, args, kwargs, encoding="utf-8"): + items = list(kwargs.items()) + + def to_text(s): + return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s) + + if args: + source = u"\n".join(to_text(x) for x in args) + basename = self.request.function.__name__ + items.insert(0, (basename, source)) + + ret = None + for basename, value in items: + p = self.tmpdir.join(basename).new(ext=ext) + p.dirpath().ensure_dir() + source = Source(value) + source = u"\n".join(to_text(line) for line in source.lines) + p.write(source.strip().encode(encoding), "wb") + if ret is None: + ret = p + return ret + + def makefile(self, ext, *args, **kwargs): + r"""Create new file(s) in the testdir. + + :param str ext: The extension the file(s) should use, including the dot, e.g. `.py`. + :param list[str] args: All args will be treated as strings and joined using newlines. + The result will be written as contents to the file. The name of the + file will be based on the test function requesting this fixture. + :param kwargs: Each keyword is the name of a file, while the value of it will + be written as contents of the file. + + Examples: + + .. code-block:: python + + testdir.makefile(".txt", "line1", "line2") + + testdir.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") + + """ + return self._makefile(ext, args, kwargs) + + def makeconftest(self, source): + """Write a contest.py file with 'source' as contents.""" + return self.makepyfile(conftest=source) + + def makeini(self, source): + """Write a tox.ini file with 'source' as contents.""" + return self.makefile(".ini", tox=source) + + def getinicfg(self, source): + """Return the pytest section from the tox.ini config file.""" + p = self.makeini(source) + return py.iniconfig.IniConfig(p)["pytest"] + + def makepyfile(self, *args, **kwargs): + """Shortcut for .makefile() with a .py extension.""" + return self._makefile(".py", args, kwargs) + + def maketxtfile(self, *args, **kwargs): + """Shortcut for .makefile() with a .txt extension.""" + return self._makefile(".txt", args, kwargs) + + def syspathinsert(self, path=None): + """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`. + + This is undone automatically when this object dies at the end of each + test. + """ + from pkg_resources import fixup_namespace_packages + + if path is None: + path = self.tmpdir + + dirname = str(path) + sys.path.insert(0, dirname) + fixup_namespace_packages(dirname) + + # a call to syspathinsert() usually means that the caller wants to + # import some dynamically created files, thus with python3 we + # invalidate its import caches + self._possibly_invalidate_import_caches() + + def _possibly_invalidate_import_caches(self): + # invalidate caches if we can (py33 and above) + try: + from importlib import invalidate_caches + except ImportError: + return + invalidate_caches() + + def mkdir(self, name): + """Create a new (sub)directory.""" + return self.tmpdir.mkdir(name) + + def mkpydir(self, name): + """Create a new python package. + + This creates a (sub)directory with an empty ``__init__.py`` file so it + gets recognised as a python package. + + """ + p = self.mkdir(name) + p.ensure("__init__.py") + return p + + def copy_example(self, name=None): + import warnings + from _pytest.warning_types import PYTESTER_COPY_EXAMPLE + + warnings.warn(PYTESTER_COPY_EXAMPLE, stacklevel=2) + example_dir = self.request.config.getini("pytester_example_dir") + if example_dir is None: + raise ValueError("pytester_example_dir is unset, can't copy examples") + example_dir = self.request.config.rootdir.join(example_dir) + + for extra_element in self.request.node.iter_markers("pytester_example_path"): + assert extra_element.args + example_dir = example_dir.join(*extra_element.args) + + if name is None: + func_name = self.request.function.__name__ + maybe_dir = example_dir / func_name + maybe_file = example_dir / (func_name + ".py") + + if maybe_dir.isdir(): + example_path = maybe_dir + elif maybe_file.isfile(): + example_path = maybe_file + else: + raise LookupError( + "{} cant be found as module or package in {}".format( + func_name, example_dir.bestrelpath(self.request.config.rootdir) + ) + ) + else: + example_path = example_dir.join(name) + + if example_path.isdir() and not example_path.join("__init__.py").isfile(): + example_path.copy(self.tmpdir) + return self.tmpdir + elif example_path.isfile(): + result = self.tmpdir.join(example_path.basename) + example_path.copy(result) + return result + else: + raise LookupError( + 'example "{}" is not found as a file or directory'.format(example_path) + ) + + Session = Session + + def getnode(self, config, arg): + """Return the collection node of a file. + + :param config: :py:class:`_pytest.config.Config` instance, see + :py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the + configuration + + :param arg: a :py:class:`py.path.local` instance of the file + + """ + session = Session(config) + assert "::" not in str(arg) + p = py.path.local(arg) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([str(p)], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) + return res + + def getpathnode(self, path): + """Return the collection node of a file. + + This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to + create the (configured) pytest Config instance. + + :param path: a :py:class:`py.path.local` instance of the file + + """ + config = self.parseconfigure(path) + session = Session(config) + x = session.fspath.bestrelpath(path) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) + return res + + def genitems(self, colitems): + """Generate all test items from a collection node. + + This recurses into the collection node and returns a list of all the + test items contained within. + + """ + session = colitems[0].session + result = [] + for colitem in colitems: + result.extend(session.genitems(colitem)) + return result + + def runitem(self, source): + """Run the "test_func" Item. + + The calling test instance (class containing the test method) must + provide a ``.getrunner()`` method which should return a runner which + can run the test protocol for a single item, e.g. + :py:func:`_pytest.runner.runtestprotocol`. + + """ + # used from runner functional tests + item = self.getitem(source) + # the test class where we are called from wants to provide the runner + testclassinstance = self.request.instance + runner = testclassinstance.getrunner() + return runner(item) + + def inline_runsource(self, source, *cmdlineargs): + """Run a test module in process using ``pytest.main()``. + + This run writes "source" into a temporary file and runs + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance + for the result. + + :param source: the source code of the test module + + :param cmdlineargs: any extra command line arguments to use + + :return: :py:class:`HookRecorder` instance of the result + + """ + p = self.makepyfile(source) + values = list(cmdlineargs) + [p] + return self.inline_run(*values) + + def inline_genitems(self, *args): + """Run ``pytest.main(['--collectonly'])`` in-process. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself like :py:meth:`inline_run`, but returns a + tuple of the collected items and a :py:class:`HookRecorder` instance. + + """ + rec = self.inline_run("--collect-only", *args) + items = [x.item for x in rec.getcalls("pytest_itemcollected")] + return items, rec + + def inline_run(self, *args, **kwargs): + """Run ``pytest.main()`` in-process, returning a HookRecorder. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself. This means it can return a + :py:class:`HookRecorder` instance which gives more detailed results + from that run than can be done by matching stdout/stderr from + :py:meth:`runpytest`. + + :param args: command line arguments to pass to :py:func:`pytest.main` + + :param plugin: (keyword-only) extra plugin instances the + ``pytest.main()`` instance should use + + :return: a :py:class:`HookRecorder` instance + + """ + finalizers = [] + try: + # Do not load user config. + monkeypatch = MonkeyPatch() + monkeypatch.setenv("HOME", str(self.tmpdir)) + monkeypatch.setenv("USERPROFILE", str(self.tmpdir)) + finalizers.append(monkeypatch.undo) + + # When running pytest inline any plugins active in the main test + # process are already imported. So this disables the warning which + # will trigger to say they can no longer be rewritten, which is + # fine as they have already been rewritten. + orig_warn = AssertionRewritingHook._warn_already_imported + + def revert_warn_already_imported(): + AssertionRewritingHook._warn_already_imported = orig_warn + + finalizers.append(revert_warn_already_imported) + AssertionRewritingHook._warn_already_imported = lambda *a: None + + # Any sys.module or sys.path changes done while running pytest + # inline should be reverted after the test run completes to avoid + # clashing with later inline tests run within the same pytest test, + # e.g. just because they use matching test module names. + finalizers.append(self.__take_sys_modules_snapshot().restore) + finalizers.append(SysPathsSnapshot().restore) + + # Important note: + # - our tests should not leave any other references/registrations + # laying around other than possibly loaded test modules + # referenced from sys.modules, as nothing will clean those up + # automatically + + rec = [] + + class Collect(object): + def pytest_configure(x, config): + rec.append(self.make_hook_recorder(config.pluginmanager)) + + plugins = kwargs.get("plugins") or [] + plugins.append(Collect()) + ret = pytest.main(list(args), plugins=plugins) + if len(rec) == 1: + reprec = rec.pop() + else: + + class reprec(object): + pass + + reprec.ret = ret + + # typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing + if ret == EXIT_INTERRUPTED and not kwargs.get("no_reraise_ctrlc"): + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() + return reprec + finally: + for finalizer in finalizers: + finalizer() + + def runpytest_inprocess(self, *args, **kwargs): + """Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides. + + """ + if kwargs.get("syspathinsert"): + self.syspathinsert() + now = time.time() + capture = MultiCapture(Capture=SysCapture) + capture.start_capturing() + try: + try: + reprec = self.inline_run(*args, **kwargs) + except SystemExit as e: + + class reprec(object): + ret = e.args[0] + + except Exception: + traceback.print_exc() + + class reprec(object): + ret = 3 + + finally: + out, err = capture.readouterr() + capture.stop_capturing() + sys.stdout.write(out) + sys.stderr.write(err) + + res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now) + res.reprec = reprec + return res + + def runpytest(self, *args, **kwargs): + """Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`RunResult`. + + """ + args = self._ensure_basetemp(args) + return self._runpytest_method(*args, **kwargs) + + def _ensure_basetemp(self, args): + args = list(args) + for x in args: + if safe_str(x).startswith("--basetemp"): + break + else: + args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp")) + return args + + def parseconfig(self, *args): + """Return a new pytest Config instance from given commandline args. + + This invokes the pytest bootstrapping code in _pytest.config to create + a new :py:class:`_pytest.core.PluginManager` and call the + pytest_cmdline_parse hook to create a new + :py:class:`_pytest.config.Config` instance. + + If :py:attr:`plugins` has been populated they should be plugin modules + to be registered with the PluginManager. + + """ + args = self._ensure_basetemp(args) + + import _pytest.config + + config = _pytest.config._prepareconfig(args, self.plugins) + # we don't know what the test will do with this half-setup config + # object and thus we make sure it gets unconfigured properly in any + # case (otherwise capturing could still be active, for example) + self.request.addfinalizer(config._ensure_unconfigure) + return config + + def parseconfigure(self, *args): + """Return a new pytest configured Config instance. + + This returns a new :py:class:`_pytest.config.Config` instance like + :py:meth:`parseconfig`, but also calls the pytest_configure hook. + + """ + config = self.parseconfig(*args) + config._do_configure() + self.request.addfinalizer(config._ensure_unconfigure) + return config + + def getitem(self, source, funcname="test_func"): + """Return the test item for a test function. + + This writes the source to a python file and runs pytest's collection on + the resulting module, returning the test item for the requested + function name. + + :param source: the module source + + :param funcname: the name of the test function for which to return a + test item + + """ + items = self.getitems(source) + for item in items: + if item.name == funcname: + return item + assert 0, "%r item not found in module:\n%s\nitems: %s" % ( + funcname, + source, + items, + ) + + def getitems(self, source): + """Return all test items collected from the module. + + This writes the source to a python file and runs pytest's collection on + the resulting module, returning all test items contained within. + + """ + modcol = self.getmodulecol(source) + return self.genitems([modcol]) + + def getmodulecol(self, source, configargs=(), withinit=False): + """Return the module collection node for ``source``. + + This writes ``source`` to a file using :py:meth:`makepyfile` and then + runs the pytest collection on it, returning the collection node for the + test module. + + :param source: the source code of the module to collect + + :param configargs: any extra arguments to pass to + :py:meth:`parseconfigure` + + :param withinit: whether to also write an ``__init__.py`` file to the + same directory to ensure it is a package + + """ + if isinstance(source, Path): + path = self.tmpdir.join(str(source)) + assert not withinit, "not supported for paths" + else: + kw = {self.request.function.__name__: Source(source).strip()} + path = self.makepyfile(**kw) + if withinit: + self.makepyfile(__init__="#") + self.config = config = self.parseconfigure(path, *configargs) + return self.getnode(config, path) + + def collect_by_name(self, modcol, name): + """Return the collection node for name from the module collection. + + This will search a module collection node for a collection node + matching the given name. + + :param modcol: a module collection node; see :py:meth:`getmodulecol` + + :param name: the name of the node to return + + """ + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: + if colitem.name == name: + return colitem + + def popen(self, cmdargs, stdout, stderr, **kw): + """Invoke subprocess.Popen. + + This calls subprocess.Popen making sure the current working directory + is in the PYTHONPATH. + + You probably want to use :py:meth:`run` instead. + + """ + env = os.environ.copy() + env["PYTHONPATH"] = os.pathsep.join( + filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) + ) + # Do not load user config. + env["HOME"] = str(self.tmpdir) + env["USERPROFILE"] = env["HOME"] + kw["env"] = env + + popen = subprocess.Popen( + cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw + ) + popen.stdin.close() + + return popen + + def run(self, *cmdargs, **kwargs): + """Run a command with arguments. + + Run a process using subprocess.Popen saving the stdout and stderr. + + :param args: the sequence of arguments to pass to `subprocess.Popen()` + :param timeout: the period in seconds after which to timeout and raise + :py:class:`Testdir.TimeoutExpired` + + Returns a :py:class:`RunResult`. + + """ + __tracebackhide__ = True + + timeout = kwargs.pop("timeout", None) + raise_on_kwargs(kwargs) + + cmdargs = [ + str(arg) if isinstance(arg, py.path.local) else arg for arg in cmdargs + ] + p1 = self.tmpdir.join("stdout") + p2 = self.tmpdir.join("stderr") + print("running:", *cmdargs) + print(" in:", py.path.local()) + f1 = codecs.open(str(p1), "w", encoding="utf8") + f2 = codecs.open(str(p2), "w", encoding="utf8") + try: + now = time.time() + popen = self.popen( + cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32") + ) + + def handle_timeout(): + __tracebackhide__ = True + + timeout_message = ( + "{seconds} second timeout expired running:" + " {command}".format(seconds=timeout, command=cmdargs) + ) + + popen.kill() + popen.wait() + raise self.TimeoutExpired(timeout_message) + + if timeout is None: + ret = popen.wait() + elif six.PY3: + try: + ret = popen.wait(timeout) + except subprocess.TimeoutExpired: + handle_timeout() + else: + end = time.time() + timeout + + resolution = min(0.1, timeout / 10) + + while True: + ret = popen.poll() + if ret is not None: + break + + if time.time() > end: + handle_timeout() + + time.sleep(resolution) + finally: + f1.close() + f2.close() + f1 = codecs.open(str(p1), "r", encoding="utf8") + f2 = codecs.open(str(p2), "r", encoding="utf8") + try: + out = f1.read().splitlines() + err = f2.read().splitlines() + finally: + f1.close() + f2.close() + self._dump_lines(out, sys.stdout) + self._dump_lines(err, sys.stderr) + return RunResult(ret, out, err, time.time() - now) + + def _dump_lines(self, lines, fp): + try: + for line in lines: + print(line, file=fp) + except UnicodeEncodeError: + print("couldn't print to %s because of encoding" % (fp,)) + + def _getpytestargs(self): + return sys.executable, "-mpytest" + + def runpython(self, script): + """Run a python script using sys.executable as interpreter. + + Returns a :py:class:`RunResult`. + + """ + return self.run(sys.executable, script) + + def runpython_c(self, command): + """Run python -c "command", return a :py:class:`RunResult`.""" + return self.run(sys.executable, "-c", command) + + def runpytest_subprocess(self, *args, **kwargs): + """Run pytest as a subprocess with given arguments. + + Any plugins added to the :py:attr:`plugins` list will be added using the + ``-p`` command line option. Additionally ``--basetemp`` is used to put + any temporary files and directories in a numbered directory prefixed + with "runpytest-" to not conflict with the normal numbered pytest + location for temporary files and directories. + + :param args: the sequence of arguments to pass to the pytest subprocess + :param timeout: the period in seconds after which to timeout and raise + :py:class:`Testdir.TimeoutExpired` + + Returns a :py:class:`RunResult`. + + """ + __tracebackhide__ = True + + p = py.path.local.make_numbered_dir( + prefix="runpytest-", keep=None, rootdir=self.tmpdir + ) + args = ("--basetemp=%s" % p,) + args + plugins = [x for x in self.plugins if isinstance(x, str)] + if plugins: + args = ("-p", plugins[0]) + args + args = self._getpytestargs() + args + return self.run(*args, timeout=kwargs.get("timeout")) + + def spawn_pytest(self, string, expect_timeout=10.0): + """Run pytest using pexpect. + + This makes sure to use the right pytest and sets up the temporary + directory locations. + + The pexpect child is returned. + + """ + basetemp = self.tmpdir.mkdir("temp-pexpect") + invoke = " ".join(map(str, self._getpytestargs())) + cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) + return self.spawn(cmd, expect_timeout=expect_timeout) + + def spawn(self, cmd, expect_timeout=10.0): + """Run a command using pexpect. + + The pexpect child is returned. + + """ + pexpect = pytest.importorskip("pexpect", "3.0") + if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): + pytest.skip("pypy-64 bit not supported") + if sys.platform.startswith("freebsd"): + pytest.xfail("pexpect does not work reliably on freebsd") + logfile = self.tmpdir.join("spawn.out").open("wb") + child = pexpect.spawn(cmd, logfile=logfile) + self.request.addfinalizer(logfile.close) + child.timeout = expect_timeout + return child + + +def getdecoded(out): + try: + return out.decode("utf-8") + except UnicodeDecodeError: + return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (saferepr(out),) + + +class LineComp(object): + def __init__(self): + self.stringio = py.io.TextIO() + + def assert_contains_lines(self, lines2): + """Assert that lines2 are contained (linearly) in lines1. + + Return a list of extralines found. + + """ + __tracebackhide__ = True + val = self.stringio.getvalue() + self.stringio.truncate(0) + self.stringio.seek(0) + lines1 = val.split("\n") + return LineMatcher(lines1).fnmatch_lines(lines2) + + +class LineMatcher(object): + """Flexible matching of text. + + This is a convenience class to test large texts like the output of + commands. + + The constructor takes a list of lines without their trailing newlines, i.e. + ``text.splitlines()``. + + """ + + def __init__(self, lines): + self.lines = lines + self._log_output = [] + + def str(self): + """Return the entire original text.""" + return "\n".join(self.lines) + + def _getlines(self, lines2): + if isinstance(lines2, str): + lines2 = Source(lines2) + if isinstance(lines2, Source): + lines2 = lines2.strip().lines + return lines2 + + def fnmatch_lines_random(self, lines2): + """Check lines exist in the output using in any order. + + Lines are checked using ``fnmatch.fnmatch``. The argument is a list of + lines which have to occur in the output, in any order. + + """ + self._match_lines_random(lines2, fnmatch) + + def re_match_lines_random(self, lines2): + """Check lines exist in the output using ``re.match``, in any order. + + The argument is a list of lines which have to occur in the output, in + any order. + + """ + self._match_lines_random(lines2, lambda name, pat: re.match(pat, name)) + + def _match_lines_random(self, lines2, match_func): + """Check lines exist in the output. + + The argument is a list of lines which have to occur in the output, in + any order. Each line can contain glob whildcards. + + """ + lines2 = self._getlines(lines2) + for line in lines2: + for x in self.lines: + if line == x or match_func(x, line): + self._log("matched: ", repr(line)) + break + else: + self._log("line %r not found in output" % line) + raise ValueError(self._log_text) + + def get_lines_after(self, fnline): + """Return all lines following the given line in the text. + + The given line can contain glob wildcards. + + """ + for i, line in enumerate(self.lines): + if fnline == line or fnmatch(line, fnline): + return self.lines[i + 1 :] + raise ValueError("line %r not found in output" % fnline) + + def _log(self, *args): + self._log_output.append(" ".join((str(x) for x in args))) + + @property + def _log_text(self): + return "\n".join(self._log_output) + + def fnmatch_lines(self, lines2): + """Search captured text for matching lines using ``fnmatch.fnmatch``. + + The argument is a list of lines which have to match and can use glob + wildcards. If they do not match a pytest.fail() is called. The + matches and non-matches are also printed on stdout. + + """ + __tracebackhide__ = True + self._match_lines(lines2, fnmatch, "fnmatch") + + def re_match_lines(self, lines2): + """Search captured text for matching lines using ``re.match``. + + The argument is a list of lines which have to match using ``re.match``. + If they do not match a pytest.fail() is called. + + The matches and non-matches are also printed on stdout. + + """ + __tracebackhide__ = True + self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match") + + def _match_lines(self, lines2, match_func, match_nickname): + """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. + + :param list[str] lines2: list of string patterns to match. The actual + format depends on ``match_func`` + :param match_func: a callable ``match_func(line, pattern)`` where line + is the captured line from stdout/stderr and pattern is the matching + pattern + :param str match_nickname: the nickname for the match function that + will be logged to stdout when a match occurs + + """ + assert isinstance(lines2, Sequence) + lines2 = self._getlines(lines2) + lines1 = self.lines[:] + nextline = None + extralines = [] + __tracebackhide__ = True + for line in lines2: + nomatchprinted = False + while lines1: + nextline = lines1.pop(0) + if line == nextline: + self._log("exact match:", repr(line)) + break + elif match_func(nextline, line): + self._log("%s:" % match_nickname, repr(line)) + self._log(" with:", repr(nextline)) + break + else: + if not nomatchprinted: + self._log("nomatch:", repr(line)) + nomatchprinted = True + self._log(" and:", repr(nextline)) + extralines.append(nextline) + else: + self._log("remains unmatched: %r" % (line,)) + pytest.fail(self._log_text) diff --git a/env_27/lib/python2.7/site-packages/_pytest/python.py b/env_27/lib/python2.7/site-packages/_pytest/python.py new file mode 100644 index 0000000..9e43922 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/python.py @@ -0,0 +1,1480 @@ +""" Python test discovery, setup and run of test functions. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import fnmatch +import inspect +import os +import sys +import warnings +from functools import partial +from textwrap import dedent + +import py +import six + +import _pytest +from _pytest import deprecated +from _pytest import fixtures +from _pytest import nodes +from _pytest._code import filter_traceback +from _pytest.compat import ascii_escaped +from _pytest.compat import enum +from _pytest.compat import get_default_arg_names +from _pytest.compat import get_real_func +from _pytest.compat import getfslineno +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import is_generator +from _pytest.compat import isclass +from _pytest.compat import isfunction +from _pytest.compat import NoneType +from _pytest.compat import NOTSET +from _pytest.compat import REGEX_TYPE +from _pytest.compat import safe_getattr +from _pytest.compat import safe_isclass +from _pytest.compat import safe_str +from _pytest.compat import STRING_TYPES +from _pytest.config import hookimpl +from _pytest.main import FSHookProxy +from _pytest.mark import MARK_GEN +from _pytest.mark.structures import get_unpacked_marks +from _pytest.mark.structures import normalize_mark_list +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.pathlib import parts +from _pytest.warning_types import PytestWarning + + +def pyobj_property(name): + def get(self): + node = self.getparent(getattr(__import__("pytest"), name)) + if node is not None: + return node.obj + + doc = "python %s object this node was collected from (can be None)." % ( + name.lower(), + ) + return property(get, None, None, doc) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption( + "--fixtures", + "--funcargs", + action="store_true", + dest="showfixtures", + default=False, + help="show available fixtures, sorted by plugin appearance " + "(fixtures with leading '_' are only shown with '-v')", + ) + group.addoption( + "--fixtures-per-test", + action="store_true", + dest="show_fixtures_per_test", + default=False, + help="show fixtures per test", + ) + parser.addini( + "usefixtures", + type="args", + default=[], + help="list of default fixtures to be used with this project", + ) + parser.addini( + "python_files", + type="args", + default=["test_*.py", "*_test.py"], + help="glob-style file patterns for Python test module discovery", + ) + parser.addini( + "python_classes", + type="args", + default=["Test"], + help="prefixes or glob names for Python test class discovery", + ) + parser.addini( + "python_functions", + type="args", + default=["test"], + help="prefixes or glob names for Python test function and method discovery", + ) + parser.addini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support", + type="bool", + default=False, + help="disable string escape non-ascii characters, might cause unwanted " + "side effects(use at your own risk)", + ) + + group.addoption( + "--import-mode", + default="prepend", + choices=["prepend", "append"], + dest="importmode", + help="prepend/append to sys.path when importing test modules, " + "default is to prepend.", + ) + + +def pytest_cmdline_main(config): + if config.option.showfixtures: + showfixtures(config) + return 0 + if config.option.show_fixtures_per_test: + show_fixtures_per_test(config) + return 0 + + +def pytest_generate_tests(metafunc): + # those alternative spellings are common - raise a specific error to alert + # the user + alt_spellings = ["parameterize", "parametrise", "parameterise"] + for mark_name in alt_spellings: + if metafunc.definition.get_closest_marker(mark_name): + msg = "{0} has '{1}' mark, spelling should be 'parametrize'" + fail(msg.format(metafunc.function.__name__, mark_name), pytrace=False) + for marker in metafunc.definition.iter_markers(name="parametrize"): + metafunc.parametrize(*marker.args, **marker.kwargs) + + +def pytest_configure(config): + config.addinivalue_line( + "markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see https://docs.pytest.org/en/latest/parametrize.html for more info " + "and examples.", + ) + config.addinivalue_line( + "markers", + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see " + "https://docs.pytest.org/en/latest/fixture.html#usefixtures ", + ) + + +@hookimpl(trylast=True) +def pytest_pyfunc_call(pyfuncitem): + testfunction = pyfuncitem.obj + iscoroutinefunction = getattr(inspect, "iscoroutinefunction", None) + if iscoroutinefunction is not None and iscoroutinefunction(testfunction): + msg = "Coroutine functions are not natively supported and have been skipped.\n" + msg += "You need to install a suitable plugin for your async framework, for example:\n" + msg += " - pytest-asyncio\n" + msg += " - pytest-trio\n" + msg += " - pytest-tornasync" + warnings.warn(PytestWarning(msg.format(pyfuncitem.nodeid))) + skip(msg="coroutine function and no async plugin installed (see warnings)") + funcargs = pyfuncitem.funcargs + testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} + testfunction(**testargs) + return True + + +def pytest_collect_file(path, parent): + ext = path.ext + if ext == ".py": + if not parent.session.isinitpath(path): + if not path_matches_patterns( + path, parent.config.getini("python_files") + ["__init__.py"] + ): + return + ihook = parent.session.gethookproxy(path) + return ihook.pytest_pycollect_makemodule(path=path, parent=parent) + + +def path_matches_patterns(path, patterns): + """Returns True if the given py.path.local matches one of the patterns in the list of globs given""" + return any(path.fnmatch(pattern) for pattern in patterns) + + +def pytest_pycollect_makemodule(path, parent): + if path.basename == "__init__.py": + return Package(path, parent) + return Module(path, parent) + + +@hookimpl(hookwrapper=True) +def pytest_pycollect_makeitem(collector, name, obj): + outcome = yield + res = outcome.get_result() + if res is not None: + return + # nothing was collected elsewhere, let's do it here + if safe_isclass(obj): + if collector.istestclass(obj, name): + outcome.force_result(Class(name, parent=collector)) + elif collector.istestfunction(obj, name): + # mock seems to store unbound methods (issue473), normalize it + obj = getattr(obj, "__func__", obj) + # We need to try and unwrap the function if it's a functools.partial + # or a funtools.wrapped. + # We musn't if it's been wrapped with mock.patch (python 2 only) + if not (isfunction(obj) or isfunction(get_real_func(obj))): + filename, lineno = getfslineno(obj) + warnings.warn_explicit( + message=PytestWarning( + "cannot collect %r because it is not a function." % name + ), + category=None, + filename=str(filename), + lineno=lineno + 1, + ) + elif getattr(obj, "__test__", True): + if is_generator(obj): + res = Function(name, parent=collector) + reason = deprecated.YIELD_TESTS.format(name=name) + res.add_marker(MARK_GEN.xfail(run=False, reason=reason)) + res.warn(PytestWarning(reason)) + else: + res = list(collector._genfunctions(name, obj)) + outcome.force_result(res) + + +def pytest_make_parametrize_id(config, val, argname=None): + return None + + +class PyobjContext(object): + module = pyobj_property("Module") + cls = pyobj_property("Class") + instance = pyobj_property("Instance") + + +class PyobjMixin(PyobjContext): + _ALLOW_MARKERS = True + + def __init__(self, *k, **kw): + super(PyobjMixin, self).__init__(*k, **kw) + + @property + def obj(self): + """Underlying Python object.""" + obj = getattr(self, "_obj", None) + if obj is None: + self._obj = obj = self._getobj() + # XXX evil hack + # used to avoid Instance collector marker duplication + if self._ALLOW_MARKERS: + self.own_markers.extend(get_unpacked_marks(self.obj)) + return obj + + @obj.setter + def obj(self, value): + self._obj = value + + def _getobj(self): + """Gets the underlying Python object. May be overwritten by subclasses.""" + return getattr(self.parent.obj, self.name) + + def getmodpath(self, stopatmodule=True, includemodule=False): + """ return python path relative to the containing module. """ + chain = self.listchain() + chain.reverse() + parts = [] + for node in chain: + if isinstance(node, Instance): + continue + name = node.name + if isinstance(node, Module): + name = os.path.splitext(name)[0] + if stopatmodule: + if includemodule: + parts.append(name) + break + parts.append(name) + parts.reverse() + s = ".".join(parts) + return s.replace(".[", "[") + + def reportinfo(self): + # XXX caching? + obj = self.obj + compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None) + if isinstance(compat_co_firstlineno, int): + # nose compatibility + fspath = sys.modules[obj.__module__].__file__ + if fspath.endswith(".pyc"): + fspath = fspath[:-1] + lineno = compat_co_firstlineno + else: + fspath, lineno = getfslineno(obj) + modpath = self.getmodpath() + assert isinstance(lineno, int) + return fspath, lineno, modpath + + +class PyCollector(PyobjMixin, nodes.Collector): + def funcnamefilter(self, name): + return self._matches_prefix_or_glob_option("python_functions", name) + + def isnosetest(self, obj): + """ Look for the __test__ attribute, which is applied by the + @nose.tools.istest decorator + """ + # We explicitly check for "is True" here to not mistakenly treat + # classes with a custom __getattr__ returning something truthy (like a + # function) as test classes. + return safe_getattr(obj, "__test__", False) is True + + def classnamefilter(self, name): + return self._matches_prefix_or_glob_option("python_classes", name) + + def istestfunction(self, obj, name): + if self.funcnamefilter(name) or self.isnosetest(obj): + if isinstance(obj, staticmethod): + # static methods need to be unwrapped + obj = safe_getattr(obj, "__func__", False) + return ( + safe_getattr(obj, "__call__", False) + and fixtures.getfixturemarker(obj) is None + ) + else: + return False + + def istestclass(self, obj, name): + return self.classnamefilter(name) or self.isnosetest(obj) + + def _matches_prefix_or_glob_option(self, option_name, name): + """ + checks if the given name matches the prefix or glob-pattern defined + in ini configuration. + """ + for option in self.config.getini(option_name): + if name.startswith(option): + return True + # check that name looks like a glob-string before calling fnmatch + # because this is called for every name in each collected module, + # and fnmatch is somewhat expensive to call + elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( + name, option + ): + return True + return False + + def collect(self): + if not getattr(self.obj, "__test__", True): + return [] + + # NB. we avoid random getattrs and peek in the __dict__ instead + # (XXX originally introduced from a PyPy need, still true?) + dicts = [getattr(self.obj, "__dict__", {})] + for basecls in inspect.getmro(self.obj.__class__): + dicts.append(basecls.__dict__) + seen = {} + values = [] + for dic in dicts: + for name, obj in list(dic.items()): + if name in seen: + continue + seen[name] = True + res = self._makeitem(name, obj) + if res is None: + continue + if not isinstance(res, list): + res = [res] + values.extend(res) + values.sort(key=lambda item: item.reportinfo()[:2]) + return values + + def _makeitem(self, name, obj): + # assert self.ihook.fspath == self.fspath, self + return self.ihook.pytest_pycollect_makeitem(collector=self, name=name, obj=obj) + + def _genfunctions(self, name, funcobj): + module = self.getparent(Module).obj + clscol = self.getparent(Class) + cls = clscol and clscol.obj or None + fm = self.session._fixturemanager + + definition = FunctionDefinition(name=name, parent=self, callobj=funcobj) + fixtureinfo = fm.getfixtureinfo(definition, funcobj, cls) + + metafunc = Metafunc( + definition, fixtureinfo, self.config, cls=cls, module=module + ) + methods = [] + if hasattr(module, "pytest_generate_tests"): + methods.append(module.pytest_generate_tests) + if hasattr(cls, "pytest_generate_tests"): + methods.append(cls().pytest_generate_tests) + if methods: + self.ihook.pytest_generate_tests.call_extra( + methods, dict(metafunc=metafunc) + ) + else: + self.ihook.pytest_generate_tests(metafunc=metafunc) + + if not metafunc._calls: + yield Function(name, parent=self, fixtureinfo=fixtureinfo) + else: + # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs + fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm) + + # add_funcarg_pseudo_fixture_def may have shadowed some fixtures + # with direct parametrization, so make sure we update what the + # function really needs. + fixtureinfo.prune_dependency_tree() + + for callspec in metafunc._calls: + subname = "%s[%s]" % (name, callspec.id) + yield Function( + name=subname, + parent=self, + callspec=callspec, + callobj=funcobj, + fixtureinfo=fixtureinfo, + keywords={callspec.id: True}, + originalname=name, + ) + + +class Module(nodes.File, PyCollector): + """ Collector for test classes and functions. """ + + def _getobj(self): + return self._importtestmodule() + + def collect(self): + self._inject_setup_module_fixture() + self._inject_setup_function_fixture() + self.session._fixturemanager.parsefactories(self) + return super(Module, self).collect() + + def _inject_setup_module_fixture(self): + """Injects a hidden autouse, module scoped fixture into the collected module object + that invokes setUpModule/tearDownModule if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_module = _get_non_fixture_func(self.obj, "setUpModule") + if setup_module is None: + setup_module = _get_non_fixture_func(self.obj, "setup_module") + + teardown_module = _get_non_fixture_func(self.obj, "tearDownModule") + if teardown_module is None: + teardown_module = _get_non_fixture_func(self.obj, "teardown_module") + + if setup_module is None and teardown_module is None: + return + + @fixtures.fixture(autouse=True, scope="module") + def xunit_setup_module_fixture(request): + if setup_module is not None: + _call_with_optional_argument(setup_module, request.module) + yield + if teardown_module is not None: + _call_with_optional_argument(teardown_module, request.module) + + self.obj.__pytest_setup_module = xunit_setup_module_fixture + + def _inject_setup_function_fixture(self): + """Injects a hidden autouse, function scoped fixture into the collected module object + that invokes setup_function/teardown_function if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_function = _get_non_fixture_func(self.obj, "setup_function") + teardown_function = _get_non_fixture_func(self.obj, "teardown_function") + if setup_function is None and teardown_function is None: + return + + @fixtures.fixture(autouse=True, scope="function") + def xunit_setup_function_fixture(request): + if request.instance is not None: + # in this case we are bound to an instance, so we need to let + # setup_method handle this + yield + return + if setup_function is not None: + _call_with_optional_argument(setup_function, request.function) + yield + if teardown_function is not None: + _call_with_optional_argument(teardown_function, request.function) + + self.obj.__pytest_setup_function = xunit_setup_function_fixture + + def _importtestmodule(self): + # we assume we are only called once per module + importmode = self.config.getoption("--import-mode") + try: + mod = self.fspath.pyimport(ensuresyspath=importmode) + except SyntaxError: + raise self.CollectError( + _pytest._code.ExceptionInfo.from_current().getrepr(style="short") + ) + except self.fspath.ImportMismatchError: + e = sys.exc_info()[1] + raise self.CollectError( + "import file mismatch:\n" + "imported module %r has this __file__ attribute:\n" + " %s\n" + "which is not the same as the test file we want to collect:\n" + " %s\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules" % e.args + ) + except ImportError: + from _pytest._code.code import ExceptionInfo + + exc_info = ExceptionInfo.from_current() + if self.config.getoption("verbose") < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short") + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = safe_str(exc_repr) + raise self.CollectError( + "ImportError while importing test module '{fspath}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + "{traceback}".format(fspath=self.fspath, traceback=formatted_tb) + ) + except _pytest.runner.Skipped as e: + if e.allow_module_level: + raise + raise self.CollectError( + "Using pytest.skip outside of a test is not allowed. " + "To decorate a test function, use the @pytest.mark.skip " + "or @pytest.mark.skipif decorators instead, and to skip a " + "module use `pytestmark = pytest.mark.{skip,skipif}." + ) + self.config.pluginmanager.consider_module(mod) + return mod + + +class Package(Module): + def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None): + session = parent.session + nodes.FSCollector.__init__( + self, fspath, parent=parent, config=config, session=session, nodeid=nodeid + ) + self.name = fspath.dirname + self.trace = session.trace + self._norecursepatterns = session._norecursepatterns + self.fspath = fspath + + def setup(self): + # not using fixtures to call setup_module here because autouse fixtures + # from packages are not called automatically (#4085) + setup_module = _get_non_fixture_func(self.obj, "setUpModule") + if setup_module is None: + setup_module = _get_non_fixture_func(self.obj, "setup_module") + if setup_module is not None: + _call_with_optional_argument(setup_module, self.obj) + + teardown_module = _get_non_fixture_func(self.obj, "tearDownModule") + if teardown_module is None: + teardown_module = _get_non_fixture_func(self.obj, "teardown_module") + if teardown_module is not None: + func = partial(_call_with_optional_argument, teardown_module, self.obj) + self.addfinalizer(func) + + def _recurse(self, dirpath): + if dirpath.basename == "__pycache__": + return False + ihook = self.gethookproxy(dirpath.dirpath()) + if ihook.pytest_ignore_collect(path=dirpath, config=self.config): + return + for pat in self._norecursepatterns: + if dirpath.check(fnmatch=pat): + return False + ihook = self.gethookproxy(dirpath) + ihook.pytest_collect_directory(path=dirpath, parent=self) + return True + + def gethookproxy(self, fspath): + # check if we have the common case of running + # hooks with all conftest.py filesall conftest.py + pm = self.config.pluginmanager + my_conftestmodules = pm._getconftestmodules(fspath) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + if remove_mods: + # one or more conftests are not in use at this fspath + proxy = FSHookProxy(fspath, pm, remove_mods) + else: + # all plugis are active for this fspath + proxy = self.config.hook + return proxy + + def _collectfile(self, path, handle_dupes=True): + assert path.isfile(), "%r is not a file (isdir=%r, exists=%r, islink=%r)" % ( + path, + path.isdir(), + path.exists(), + path.islink(), + ) + ihook = self.gethookproxy(path) + if not self.isinitpath(path): + if ihook.pytest_ignore_collect(path=path, config=self.config): + return () + + if handle_dupes: + keepduplicates = self.config.getoption("keepduplicates") + if not keepduplicates: + duplicate_paths = self.config.pluginmanager._duplicatepaths + if path in duplicate_paths: + return () + else: + duplicate_paths.add(path) + + if self.fspath == path: # __init__.py + return [self] + + return ihook.pytest_collect_file(path=path, parent=self) + + def isinitpath(self, path): + return path in self.session._initialpaths + + def collect(self): + this_path = self.fspath.dirpath() + init_module = this_path.join("__init__.py") + if init_module.check(file=1) and path_matches_patterns( + init_module, self.config.getini("python_files") + ): + yield Module(init_module, self) + pkg_prefixes = set() + for path in this_path.visit(rec=self._recurse, bf=True, sort=True): + # We will visit our own __init__.py file, in which case we skip it. + is_file = path.isfile() + if is_file: + if path.basename == "__init__.py" and path.dirpath() == this_path: + continue + + parts_ = parts(path.strpath) + if any( + pkg_prefix in parts_ and pkg_prefix.join("__init__.py") != path + for pkg_prefix in pkg_prefixes + ): + continue + + if is_file: + for x in self._collectfile(path): + yield x + elif not path.isdir(): + # Broken symlink or invalid/missing file. + continue + elif path.join("__init__.py").check(file=1): + pkg_prefixes.add(path) + + +def _get_xunit_setup_teardown(holder, attr_name, param_obj=None): + """ + Return a callable to perform xunit-style setup or teardown if + the function exists in the ``holder`` object. + The ``param_obj`` parameter is the parameter which will be passed to the function + when the callable is called without arguments, defaults to the ``holder`` object. + Return ``None`` if a suitable callable is not found. + """ + # TODO: only needed because of Package! + param_obj = param_obj if param_obj is not None else holder + result = _get_non_fixture_func(holder, attr_name) + if result is not None: + arg_count = result.__code__.co_argcount + if inspect.ismethod(result): + arg_count -= 1 + if arg_count: + return lambda: result(param_obj) + else: + return result + + +def _call_with_optional_argument(func, arg): + """Call the given function with the given argument if func accepts one argument, otherwise + calls func without arguments""" + arg_count = func.__code__.co_argcount + if inspect.ismethod(func): + arg_count -= 1 + if arg_count: + func(arg) + else: + func() + + +def _get_non_fixture_func(obj, name): + """Return the attribute from the given object to be used as a setup/teardown + xunit-style function, but only if not marked as a fixture to + avoid calling it twice. + """ + meth = getattr(obj, name, None) + if fixtures.getfixturemarker(meth) is None: + return meth + + +class Class(PyCollector): + """ Collector for test methods. """ + + def collect(self): + if not safe_getattr(self.obj, "__test__", True): + return [] + if hasinit(self.obj): + self.warn( + PytestWarning( + "cannot collect test class %r because it has a " + "__init__ constructor" % self.obj.__name__ + ) + ) + return [] + elif hasnew(self.obj): + self.warn( + PytestWarning( + "cannot collect test class %r because it has a " + "__new__ constructor" % self.obj.__name__ + ) + ) + return [] + + self._inject_setup_class_fixture() + self._inject_setup_method_fixture() + + return [Instance(name="()", parent=self)] + + def _inject_setup_class_fixture(self): + """Injects a hidden autouse, class scoped fixture into the collected class object + that invokes setup_class/teardown_class if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_class = _get_non_fixture_func(self.obj, "setup_class") + teardown_class = getattr(self.obj, "teardown_class", None) + if setup_class is None and teardown_class is None: + return + + @fixtures.fixture(autouse=True, scope="class") + def xunit_setup_class_fixture(cls): + if setup_class is not None: + func = getimfunc(setup_class) + _call_with_optional_argument(func, self.obj) + yield + if teardown_class is not None: + func = getimfunc(teardown_class) + _call_with_optional_argument(func, self.obj) + + self.obj.__pytest_setup_class = xunit_setup_class_fixture + + def _inject_setup_method_fixture(self): + """Injects a hidden autouse, function scoped fixture into the collected class object + that invokes setup_method/teardown_method if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_method = _get_non_fixture_func(self.obj, "setup_method") + teardown_method = getattr(self.obj, "teardown_method", None) + if setup_method is None and teardown_method is None: + return + + @fixtures.fixture(autouse=True, scope="function") + def xunit_setup_method_fixture(self, request): + method = request.function + if setup_method is not None: + func = getattr(self, "setup_method") + _call_with_optional_argument(func, method) + yield + if teardown_method is not None: + func = getattr(self, "teardown_method") + _call_with_optional_argument(func, method) + + self.obj.__pytest_setup_method = xunit_setup_method_fixture + + +class Instance(PyCollector): + _ALLOW_MARKERS = False # hack, destroy later + # instances share the object with their parents in a way + # that duplicates markers instances if not taken out + # can be removed at node structure reorganization time + + def _getobj(self): + return self.parent.obj() + + def collect(self): + self.session._fixturemanager.parsefactories(self) + return super(Instance, self).collect() + + def newinstance(self): + self.obj = self._getobj() + return self.obj + + +class FunctionMixin(PyobjMixin): + """ mixin for the code common to Function and Generator. + """ + + def setup(self): + """ perform setup for this test function. """ + if isinstance(self.parent, Instance): + self.parent.newinstance() + self.obj = self._getobj() + + def _prunetraceback(self, excinfo): + if hasattr(self, "_obj") and not self.config.option.fulltrace: + code = _pytest._code.Code(get_real_func(self.obj)) + path, firstlineno = code.path, code.firstlineno + traceback = excinfo.traceback + ntraceback = traceback.cut(path=path, firstlineno=firstlineno) + if ntraceback == traceback: + ntraceback = ntraceback.cut(path=path) + if ntraceback == traceback: + ntraceback = ntraceback.filter(filter_traceback) + if not ntraceback: + ntraceback = traceback + + excinfo.traceback = ntraceback.filter() + # issue364: mark all but first and last frames to + # only show a single-line message for each frame + if self.config.option.tbstyle == "auto": + if len(excinfo.traceback) > 2: + for entry in excinfo.traceback[1:-1]: + entry.set_repr_style("short") + + def repr_failure(self, excinfo, outerr=None): + assert outerr is None, "XXX outerr usage is deprecated" + style = self.config.option.tbstyle + if style == "auto": + style = "long" + return self._repr_failure_py(excinfo, style=style) + + +def hasinit(obj): + init = getattr(obj, "__init__", None) + if init: + return init != object.__init__ + + +def hasnew(obj): + new = getattr(obj, "__new__", None) + if new: + return new != object.__new__ + + +class CallSpec2(object): + def __init__(self, metafunc): + self.metafunc = metafunc + self.funcargs = {} + self._idlist = [] + self.params = {} + self._globalid = NOTSET + self._globalparam = NOTSET + self._arg2scopenum = {} # used for sorting parametrized resources + self.marks = [] + self.indices = {} + + def copy(self): + cs = CallSpec2(self.metafunc) + cs.funcargs.update(self.funcargs) + cs.params.update(self.params) + cs.marks.extend(self.marks) + cs.indices.update(self.indices) + cs._arg2scopenum.update(self._arg2scopenum) + cs._idlist = list(self._idlist) + cs._globalid = self._globalid + cs._globalparam = self._globalparam + return cs + + def _checkargnotcontained(self, arg): + if arg in self.params or arg in self.funcargs: + raise ValueError("duplicate %r" % (arg,)) + + def getparam(self, name): + try: + return self.params[name] + except KeyError: + if self._globalparam is NOTSET: + raise ValueError(name) + return self._globalparam + + @property + def id(self): + return "-".join(map(str, filter(None, self._idlist))) + + def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, param_index): + for arg, val in zip(argnames, valset): + self._checkargnotcontained(arg) + valtype_for_arg = valtypes[arg] + getattr(self, valtype_for_arg)[arg] = val + self.indices[arg] = param_index + self._arg2scopenum[arg] = scopenum + self._idlist.append(id) + self.marks.extend(normalize_mark_list(marks)) + + def setall(self, funcargs, id, param): + for x in funcargs: + self._checkargnotcontained(x) + self.funcargs.update(funcargs) + if id is not NOTSET: + self._idlist.append(id) + if param is not NOTSET: + assert self._globalparam is NOTSET + self._globalparam = param + for arg in funcargs: + self._arg2scopenum[arg] = fixtures.scopenum_function + + +class Metafunc(fixtures.FuncargnamesCompatAttr): + """ + Metafunc objects are passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook. + They help to inspect a test function and to generate tests according to + test configuration or values specified in the class or module where a + test function is defined. + """ + + def __init__(self, definition, fixtureinfo, config, cls=None, module=None): + assert ( + isinstance(definition, FunctionDefinition) + or type(definition).__name__ == "DefinitionMock" + ) + self.definition = definition + + #: access to the :class:`_pytest.config.Config` object for the test session + self.config = config + + #: the module object where the test function is defined in. + self.module = module + + #: underlying python test function + self.function = definition.obj + + #: set of fixture names required by the test function + self.fixturenames = fixtureinfo.names_closure + + #: class object where the test function is defined in or ``None``. + self.cls = cls + + self._calls = [] + self._ids = set() + self._arg2fixturedefs = fixtureinfo.name2fixturedefs + + def parametrize(self, argnames, argvalues, indirect=False, ids=None, scope=None): + """ Add new invocations to the underlying test function using the list + of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + see about setting indirect to do it rather at test setup time. + + :arg argnames: a comma-separated string denoting one or more argument + names, or a list/tuple of argument strings. + + :arg argvalues: The list of argvalues determines how often a + test is invoked with different argument values. If only one + argname was specified argvalues is a list of values. If N + argnames were specified, argvalues must be a list of N-tuples, + where each tuple-element specifies a value for its respective + argname. + + :arg indirect: The list of argnames or boolean. A list of arguments' + names (subset of argnames). If True the list contains all names from + the argnames. Each argvalue corresponding to an argname in this list will + be passed as request.param to its respective argname fixture + function so that it can perform more expensive setups during the + setup phase of a test rather than at collection time. + + :arg ids: list of string ids, or a callable. + If strings, each is corresponding to the argvalues so that they are + part of the test id. If None is given as id of specific test, the + automatically generated id for that argument will be used. + If callable, it should take one argument (a single argvalue) and return + a string or return None. If None, the automatically generated id for that + argument will be used. + If no ids are provided they will be generated automatically from + the argvalues. + + :arg scope: if specified it denotes the scope of the parameters. + The scope is used for grouping tests by parameter instances. + It will also override any fixture-function defined scope, allowing + to set a dynamic scope using test context or configuration. + """ + from _pytest.fixtures import scope2index + from _pytest.mark import ParameterSet + + argnames, parameters = ParameterSet._for_parametrize( + argnames, + argvalues, + self.function, + self.config, + function_definition=self.definition, + ) + del argvalues + + if scope is None: + scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + + self._validate_if_using_arg_names(argnames, indirect) + + arg_values_types = self._resolve_arg_value_types(argnames, indirect) + + ids = self._resolve_arg_ids(argnames, ids, parameters, item=self.definition) + + scopenum = scope2index( + scope, descr="parametrize() call in {}".format(self.function.__name__) + ) + + # create the new calls: if we are parametrize() multiple times (by applying the decorator + # more than once) then we accumulate those calls generating the cartesian product + # of all calls + newcalls = [] + for callspec in self._calls or [CallSpec2(self)]: + for param_index, (param_id, param_set) in enumerate(zip(ids, parameters)): + newcallspec = callspec.copy() + newcallspec.setmulti2( + arg_values_types, + argnames, + param_set.values, + param_id, + param_set.marks, + scopenum, + param_index, + ) + newcalls.append(newcallspec) + self._calls = newcalls + + def _resolve_arg_ids(self, argnames, ids, parameters, item): + """Resolves the actual ids for the given argnames, based on the ``ids`` parameter given + to ``parametrize``. + + :param List[str] argnames: list of argument names passed to ``parametrize()``. + :param ids: the ids parameter of the parametrized call (see docs). + :param List[ParameterSet] parameters: the list of parameter values, same size as ``argnames``. + :param Item item: the item that generated this parametrized call. + :rtype: List[str] + :return: the list of ids for each argname given + """ + from _pytest._io.saferepr import saferepr + + idfn = None + if callable(ids): + idfn = ids + ids = None + if ids: + func_name = self.function.__name__ + if len(ids) != len(parameters): + msg = "In {}: {} parameter sets specified, with different number of ids: {}" + fail(msg.format(func_name, len(parameters), len(ids)), pytrace=False) + for id_value in ids: + if id_value is not None and not isinstance(id_value, six.string_types): + msg = "In {}: ids must be list of strings, found: {} (type: {!r})" + fail( + msg.format(func_name, saferepr(id_value), type(id_value)), + pytrace=False, + ) + ids = idmaker(argnames, parameters, idfn, ids, self.config, item=item) + return ids + + def _resolve_arg_value_types(self, argnames, indirect): + """Resolves if each parametrized argument must be considered a parameter to a fixture or a "funcarg" + to the function, based on the ``indirect`` parameter of the parametrized() call. + + :param List[str] argnames: list of argument names passed to ``parametrize()``. + :param indirect: same ``indirect`` parameter of ``parametrize()``. + :rtype: Dict[str, str] + A dict mapping each arg name to either: + * "params" if the argname should be the parameter of a fixture of the same name. + * "funcargs" if the argname should be a parameter to the parametrized test function. + """ + valtypes = {} + if indirect is True: + valtypes = dict.fromkeys(argnames, "params") + elif indirect is False: + valtypes = dict.fromkeys(argnames, "funcargs") + elif isinstance(indirect, (tuple, list)): + valtypes = dict.fromkeys(argnames, "funcargs") + for arg in indirect: + if arg not in argnames: + fail( + "In {}: indirect fixture '{}' doesn't exist".format( + self.function.__name__, arg + ), + pytrace=False, + ) + valtypes[arg] = "params" + return valtypes + + def _validate_if_using_arg_names(self, argnames, indirect): + """ + Check if all argnames are being used, by default values, or directly/indirectly. + + :param List[str] argnames: list of argument names passed to ``parametrize()``. + :param indirect: same ``indirect`` parameter of ``parametrize()``. + :raise ValueError: if validation fails. + """ + default_arg_names = set(get_default_arg_names(self.function)) + func_name = self.function.__name__ + for arg in argnames: + if arg not in self.fixturenames: + if arg in default_arg_names: + fail( + "In {}: function already takes an argument '{}' with a default value".format( + func_name, arg + ), + pytrace=False, + ) + else: + if isinstance(indirect, (tuple, list)): + name = "fixture" if arg in indirect else "argument" + else: + name = "fixture" if indirect else "argument" + fail( + "In {}: function uses no {} '{}'".format(func_name, name, arg), + pytrace=False, + ) + + +def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): + """Find the most appropriate scope for a parametrized call based on its arguments. + + When there's at least one direct argument, always use "function" scope. + + When a test function is parametrized and all its arguments are indirect + (e.g. fixtures), return the most narrow scope based on the fixtures used. + + Related to issue #1832, based on code posted by @Kingdread. + """ + from _pytest.fixtures import scopes + + if isinstance(indirect, (list, tuple)): + all_arguments_are_fixtures = len(indirect) == len(argnames) + else: + all_arguments_are_fixtures = bool(indirect) + + if all_arguments_are_fixtures: + fixturedefs = arg2fixturedefs or {} + used_scopes = [ + fixturedef[0].scope + for name, fixturedef in fixturedefs.items() + if name in argnames + ] + if used_scopes: + # Takes the most narrow scope from used fixtures + for scope in reversed(scopes): + if scope in used_scopes: + return scope + + return "function" + + +def _ascii_escaped_by_config(val, config): + if config is None: + escape_option = False + else: + escape_option = config.getini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support" + ) + return val if escape_option else ascii_escaped(val) + + +def _idval(val, argname, idx, idfn, item, config): + if idfn: + try: + generated_id = idfn(val) + if generated_id is not None: + val = generated_id + except Exception as e: + # See issue https://github.com/pytest-dev/pytest/issues/2169 + msg = "{}: error raised while trying to determine id of parameter '{}' at position {}\n" + msg = msg.format(item.nodeid, argname, idx) + # we only append the exception type and message because on Python 2 reraise does nothing + msg += " {}: {}\n".format(type(e).__name__, e) + six.raise_from(ValueError(msg), e) + elif config: + hook_id = config.hook.pytest_make_parametrize_id( + config=config, val=val, argname=argname + ) + if hook_id: + return hook_id + + if isinstance(val, STRING_TYPES): + return _ascii_escaped_by_config(val, config) + elif isinstance(val, (float, int, bool, NoneType)): + return str(val) + elif isinstance(val, REGEX_TYPE): + return ascii_escaped(val.pattern) + elif enum is not None and isinstance(val, enum.Enum): + return str(val) + elif (isclass(val) or isfunction(val)) and hasattr(val, "__name__"): + return val.__name__ + return str(argname) + str(idx) + + +def _idvalset(idx, parameterset, argnames, idfn, ids, item, config): + if parameterset.id is not None: + return parameterset.id + if ids is None or (idx >= len(ids) or ids[idx] is None): + this_id = [ + _idval(val, argname, idx, idfn, item=item, config=config) + for val, argname in zip(parameterset.values, argnames) + ] + return "-".join(this_id) + else: + return ascii_escaped(ids[idx]) + + +def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None): + ids = [ + _idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item) + for valindex, parameterset in enumerate(parametersets) + ] + if len(set(ids)) != len(ids): + # The ids are not unique + duplicates = [testid for testid in ids if ids.count(testid) > 1] + counters = collections.defaultdict(lambda: 0) + for index, testid in enumerate(ids): + if testid in duplicates: + ids[index] = testid + str(counters[testid]) + counters[testid] += 1 + return ids + + +def show_fixtures_per_test(config): + from _pytest.main import wrap_session + + return wrap_session(config, _show_fixtures_per_test) + + +def _show_fixtures_per_test(config, session): + import _pytest.config + + session.perform_collect() + curdir = py.path.local() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + def get_best_relpath(func): + loc = getlocation(func, curdir) + return curdir.bestrelpath(loc) + + def write_fixture(fixture_def): + argname = fixture_def.argname + if verbose <= 0 and argname.startswith("_"): + return + if verbose > 0: + bestrel = get_best_relpath(fixture_def.func) + funcargspec = "{} -- {}".format(argname, bestrel) + else: + funcargspec = argname + tw.line(funcargspec, green=True) + fixture_doc = fixture_def.func.__doc__ + if fixture_doc: + write_docstring(tw, fixture_doc) + else: + tw.line(" no docstring available", red=True) + + def write_item(item): + try: + info = item._fixtureinfo + except AttributeError: + # doctests items have no _fixtureinfo attribute + return + if not info.name2fixturedefs: + # this test item does not use any fixtures + return + tw.line() + tw.sep("-", "fixtures used by {}".format(item.name)) + tw.sep("-", "({})".format(get_best_relpath(item.function))) + # dict key not used in loop but needed for sorting + for _, fixturedefs in sorted(info.name2fixturedefs.items()): + assert fixturedefs is not None + if not fixturedefs: + continue + # last item is expected to be the one used by the test item + write_fixture(fixturedefs[-1]) + + for session_item in session.items: + write_item(session_item) + + +def showfixtures(config): + from _pytest.main import wrap_session + + return wrap_session(config, _showfixtures_main) + + +def _showfixtures_main(config, session): + import _pytest.config + + session.perform_collect() + curdir = py.path.local() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + fm = session._fixturemanager + + available = [] + seen = set() + + for argname, fixturedefs in fm._arg2fixturedefs.items(): + assert fixturedefs is not None + if not fixturedefs: + continue + for fixturedef in fixturedefs: + loc = getlocation(fixturedef.func, curdir) + if (fixturedef.argname, loc) in seen: + continue + seen.add((fixturedef.argname, loc)) + available.append( + ( + len(fixturedef.baseid), + fixturedef.func.__module__, + curdir.bestrelpath(loc), + fixturedef.argname, + fixturedef, + ) + ) + + available.sort() + currentmodule = None + for baseid, module, bestrel, argname, fixturedef in available: + if currentmodule != module: + if not module.startswith("_pytest."): + tw.line() + tw.sep("-", "fixtures defined from %s" % (module,)) + currentmodule = module + if verbose <= 0 and argname[0] == "_": + continue + if verbose > 0: + funcargspec = "%s -- %s" % (argname, bestrel) + else: + funcargspec = argname + tw.line(funcargspec, green=True) + loc = getlocation(fixturedef.func, curdir) + doc = fixturedef.func.__doc__ or "" + if doc: + write_docstring(tw, doc) + else: + tw.line(" %s: no docstring available" % (loc,), red=True) + + +def write_docstring(tw, doc, indent=" "): + doc = doc.rstrip() + if "\n" in doc: + firstline, rest = doc.split("\n", 1) + else: + firstline, rest = doc, "" + + if firstline.strip(): + tw.line(indent + firstline.strip()) + + if rest: + for line in dedent(rest).split("\n"): + tw.write(indent + line + "\n") + + +class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr): + """ a Function Item is responsible for setting up and executing a + Python test function. + """ + + # disable since functions handle it themselves + _ALLOW_MARKERS = False + + def __init__( + self, + name, + parent, + args=None, + config=None, + callspec=None, + callobj=NOTSET, + keywords=None, + session=None, + fixtureinfo=None, + originalname=None, + ): + super(Function, self).__init__(name, parent, config=config, session=session) + self._args = args + if callobj is not NOTSET: + self.obj = callobj + + self.keywords.update(self.obj.__dict__) + self.own_markers.extend(get_unpacked_marks(self.obj)) + if callspec: + self.callspec = callspec + # this is total hostile and a mess + # keywords are broken by design by now + # this will be redeemed later + for mark in callspec.marks: + # feel free to cry, this was broken for years before + # and keywords cant fix it per design + self.keywords[mark.name] = mark + self.own_markers.extend(normalize_mark_list(callspec.marks)) + if keywords: + self.keywords.update(keywords) + + # todo: this is a hell of a hack + # https://github.com/pytest-dev/pytest/issues/4569 + + self.keywords.update( + dict.fromkeys( + [ + mark.name + for mark in self.iter_markers() + if mark.name not in self.keywords + ], + True, + ) + ) + + if fixtureinfo is None: + fixtureinfo = self.session._fixturemanager.getfixtureinfo( + self, self.obj, self.cls, funcargs=True + ) + self._fixtureinfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + #: original function name, without any decorations (for example + #: parametrization adds a ``"[...]"`` suffix to function names). + #: + #: .. versionadded:: 3.0 + self.originalname = originalname + + def _initrequest(self): + self.funcargs = {} + self._request = fixtures.FixtureRequest(self) + + @property + def function(self): + "underlying python 'function' object" + return getimfunc(self.obj) + + def _getobj(self): + name = self.name + i = name.find("[") # parametrization + if i != -1: + name = name[:i] + return getattr(self.parent.obj, name) + + @property + def _pyfuncitem(self): + "(compatonly) for code expecting pytest-2.2 style request objects" + return self + + def runtest(self): + """ execute the underlying test function. """ + self.ihook.pytest_pyfunc_call(pyfuncitem=self) + + def setup(self): + super(Function, self).setup() + fixtures.fillfixtures(self) + + +class FunctionDefinition(Function): + """ + internal hack until we get actual definition nodes instead of the + crappy metafunc hack + """ + + def runtest(self): + raise RuntimeError("function definitions are not supposed to be used") + + setup = runtest diff --git a/env_27/lib/python2.7/site-packages/_pytest/python_api.py b/env_27/lib/python2.7/site-packages/_pytest/python_api.py new file mode 100644 index 0000000..1b643d4 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/python_api.py @@ -0,0 +1,736 @@ +from __future__ import absolute_import + +import math +import pprint +import sys +import warnings +from decimal import Decimal +from numbers import Number + +import six +from more_itertools.more import always_iterable +from six.moves import filterfalse +from six.moves import zip + +import _pytest._code +from _pytest import deprecated +from _pytest.compat import isclass +from _pytest.compat import Iterable +from _pytest.compat import Mapping +from _pytest.compat import Sized +from _pytest.compat import STRING_TYPES +from _pytest.outcomes import fail + +BASE_TYPE = (type, STRING_TYPES) + + +def _cmp_raises_type_error(self, other): + """__cmp__ implementation which raises TypeError. Used + by Approx base classes to implement only == and != and raise a + TypeError for other comparisons. + + Needed in Python 2 only, Python 3 all it takes is not implementing the + other operators at all. + """ + __tracebackhide__ = True + raise TypeError( + "Comparison operators other than == and != not supported by approx objects" + ) + + +def _non_numeric_type_error(value, at): + at_str = " at {}".format(at) if at else "" + return TypeError( + "cannot make approximate comparisons to non-numeric values: {!r} {}".format( + value, at_str + ) + ) + + +# builtin pytest.approx helper + + +class ApproxBase(object): + """ + Provide shared utilities for making approximate comparisons between numbers + or sequences of numbers. + """ + + # Tell numpy to use our `__eq__` operator instead of its. + __array_ufunc__ = None + __array_priority__ = 100 + + def __init__(self, expected, rel=None, abs=None, nan_ok=False): + __tracebackhide__ = True + self.expected = expected + self.abs = abs + self.rel = rel + self.nan_ok = nan_ok + self._check_type() + + def __repr__(self): + raise NotImplementedError + + def __eq__(self, actual): + return all( + a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) + ) + + __hash__ = None + + def __ne__(self, actual): + return not (actual == self) + + if sys.version_info[0] == 2: + __cmp__ = _cmp_raises_type_error + + def _approx_scalar(self, x): + return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + + def _yield_comparisons(self, actual): + """ + Yield all the pairs of numbers to be compared. This is used to + implement the `__eq__` method. + """ + raise NotImplementedError + + def _check_type(self): + """ + Raise a TypeError if the expected value is not a valid type. + """ + # This is only a concern if the expected value is a sequence. In every + # other case, the approx() function ensures that the expected value has + # a numeric type. For this reason, the default is to do nothing. The + # classes that deal with sequences should reimplement this method to + # raise if there are any non-numeric elements in the sequence. + pass + + +def _recursive_list_map(f, x): + if isinstance(x, list): + return list(_recursive_list_map(f, xi) for xi in x) + else: + return f(x) + + +class ApproxNumpy(ApproxBase): + """ + Perform approximate comparisons where the expected value is numpy array. + """ + + def __repr__(self): + list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist()) + return "approx({!r})".format(list_scalars) + + if sys.version_info[0] == 2: + __cmp__ = _cmp_raises_type_error + + def __eq__(self, actual): + import numpy as np + + # self.expected is supposed to always be an array here + + if not np.isscalar(actual): + try: + actual = np.asarray(actual) + except: # noqa + raise TypeError("cannot compare '{}' to numpy.ndarray".format(actual)) + + if not np.isscalar(actual) and actual.shape != self.expected.shape: + return False + + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + import numpy as np + + # `actual` can either be a numpy array or a scalar, it is treated in + # `__eq__` before being passed to `ApproxBase.__eq__`, which is the + # only method that calls this one. + + if np.isscalar(actual): + for i in np.ndindex(self.expected.shape): + yield actual, self.expected[i].item() + else: + for i in np.ndindex(self.expected.shape): + yield actual[i].item(), self.expected[i].item() + + +class ApproxMapping(ApproxBase): + """ + Perform approximate comparisons where the expected value is a mapping with + numeric values (the keys can be anything). + """ + + def __repr__(self): + return "approx({!r})".format( + {k: self._approx_scalar(v) for k, v in self.expected.items()} + ) + + def __eq__(self, actual): + if set(actual.keys()) != set(self.expected.keys()): + return False + + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + for k in self.expected.keys(): + yield actual[k], self.expected[k] + + def _check_type(self): + __tracebackhide__ = True + for key, value in self.expected.items(): + if isinstance(value, type(self.expected)): + msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}" + raise TypeError(msg.format(key, value, pprint.pformat(self.expected))) + elif not isinstance(value, Number): + raise _non_numeric_type_error(self.expected, at="key={!r}".format(key)) + + +class ApproxSequencelike(ApproxBase): + """ + Perform approximate comparisons where the expected value is a sequence of + numbers. + """ + + def __repr__(self): + seq_type = type(self.expected) + if seq_type not in (tuple, list, set): + seq_type = list + return "approx({!r})".format( + seq_type(self._approx_scalar(x) for x in self.expected) + ) + + def __eq__(self, actual): + if len(actual) != len(self.expected): + return False + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + return zip(actual, self.expected) + + def _check_type(self): + __tracebackhide__ = True + for index, x in enumerate(self.expected): + if isinstance(x, type(self.expected)): + msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}" + raise TypeError(msg.format(x, index, pprint.pformat(self.expected))) + elif not isinstance(x, Number): + raise _non_numeric_type_error( + self.expected, at="index {}".format(index) + ) + + +class ApproxScalar(ApproxBase): + """ + Perform approximate comparisons where the expected value is a single number. + """ + + DEFAULT_ABSOLUTE_TOLERANCE = 1e-12 + DEFAULT_RELATIVE_TOLERANCE = 1e-6 + + def __repr__(self): + """ + Return a string communicating both the expected value and the tolerance + for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode + plus/minus symbol if this is python3 (it's too hard to get right for + python2). + """ + if isinstance(self.expected, complex): + return str(self.expected) + + # Infinities aren't compared using tolerances, so don't show a + # tolerance. + if math.isinf(self.expected): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + vetted_tolerance = "{:.1e}".format(self.tolerance) + except ValueError: + vetted_tolerance = "???" + + if sys.version_info[0] == 2: + return "{} +- {}".format(self.expected, vetted_tolerance) + else: + return u"{} \u00b1 {}".format(self.expected, vetted_tolerance) + + def __eq__(self, actual): + """ + Return true if the given value is equal to the expected value within + the pre-specified tolerance. + """ + if _is_numpy_array(actual): + # Call ``__eq__()`` manually to prevent infinite-recursion with + # numpy<1.13. See #3748. + return all(self.__eq__(a) for a in actual.flat) + + # Short-circuit exact equality. + if actual == self.expected: + return True + + # Allow the user to control whether NaNs are considered equal to each + # other or not. The abs() calls are for compatibility with complex + # numbers. + if math.isnan(abs(self.expected)): + return self.nan_ok and math.isnan(abs(actual)) + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): + return False + + # Return true if the two numbers are within the tolerance. + return abs(self.expected - actual) <= self.tolerance + + __hash__ = None + + @property + def tolerance(self): + """ + Return the tolerance for the comparison. This could be either an + absolute tolerance or a relative tolerance, depending on what the user + specified or which would be larger. + """ + + def set_default(x, default): + return x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) + + if absolute_tolerance < 0: + raise ValueError( + "absolute tolerance can't be negative: {}".format(absolute_tolerance) + ) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default( + self.rel, self.DEFAULT_RELATIVE_TOLERANCE + ) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError( + "relative tolerance can't be negative: {}".format(absolute_tolerance) + ) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +class ApproxDecimal(ApproxScalar): + """ + Perform approximate comparisons where the expected value is a decimal. + """ + + DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") + DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") + + +def approx(expected, rel=None, abs=None, nan_ok=False): + """ + Assert that two numbers (or two sets of numbers) are equal to each other + within some tolerance. + + Due to the `intricacies of floating-point arithmetic`__, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + __ https://docs.python.org/3/tutorial/floatingpoint.html + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works for sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + Dictionary *values*:: + + >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) + True + + ``numpy`` arrays:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP + True + + And for a ``numpy`` array against a scalar:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP + True + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinity and NaN are special cases. Infinity is only considered + equal to itself, regardless of the relative tolerance. NaN is not + considered equal to anything by default, but you can make it be equal to + itself by setting the ``nan_ok`` argument to True. (This is meant to + facilitate comparing arrays that use NaN to mean "no data".) + + Both the relative and absolute tolerances can be changed by passing + arguments to the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. Only available in python>=3.5. `More information...`__ + + __ https://docs.python.org/3/library/math.html#math.isclose + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by ``numpy.allclose``. `More information...`__ + + __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered and the absolute tolerance cannot be changed, so this function + is not appropriate for very large or very small numbers. Also, it's only + available in subclasses of ``unittest.TestCase`` and it's ugly because it + doesn't follow PEP8. `More information...`__ + + __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + + .. warning:: + + .. versionchanged:: 3.2 + + In order to avoid inconsistent behavior, ``TypeError`` is + raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. + The example below illustrates the problem:: + + assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) + assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) + + In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` + to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to + comparison. This is because the call hierarchy of rich comparisons + follows a fixed behavior. `More information...`__ + + __ https://docs.python.org/3/reference/datamodel.html#object.__ge__ + """ + + # Delegate the comparison to a class that knows how to deal with the type + # of the expected value (e.g. int, float, list, dict, numpy.array, etc). + # + # The primary responsibility of these classes is to implement ``__eq__()`` + # and ``__repr__()``. The former is used to actually check if some + # "actual" value is equivalent to the given expected value within the + # allowed tolerance. The latter is used to show the user the expected + # value and tolerance, in the case that a test failed. + # + # The actual logic for making approximate comparisons can be found in + # ApproxScalar, which is used to compare individual numbers. All of the + # other Approx classes eventually delegate to this class. The ApproxBase + # class provides some convenient methods and overloads, but isn't really + # essential. + + __tracebackhide__ = True + + if isinstance(expected, Decimal): + cls = ApproxDecimal + elif isinstance(expected, Number): + cls = ApproxScalar + elif isinstance(expected, Mapping): + cls = ApproxMapping + elif _is_numpy_array(expected): + cls = ApproxNumpy + elif ( + isinstance(expected, Iterable) + and isinstance(expected, Sized) + and not isinstance(expected, STRING_TYPES) + ): + cls = ApproxSequencelike + else: + raise _non_numeric_type_error(expected, at=None) + + return cls(expected, rel, abs, nan_ok) + + +def _is_numpy_array(obj): + """ + Return true if the given object is a numpy array. Make a special effort to + avoid importing numpy unless it's really necessary. + """ + import sys + + np = sys.modules.get("numpy") + if np is not None: + return isinstance(obj, np.ndarray) + return False + + +# builtin pytest.raises helper + + +def raises(expected_exception, *args, **kwargs): + r""" + Assert that a code block/function call raises ``expected_exception`` + or raise a failure exception otherwise. + + :kwparam match: if specified, asserts that the exception matches a text or regex + + :kwparam message: **(deprecated since 4.1)** if specified, provides a custom failure message + if the exception is not raised + + .. currentmodule:: _pytest._code + + Use ``pytest.raises`` as a context manager, which will capture the exception of the given + type:: + + >>> with raises(ZeroDivisionError): + ... 1/0 + + If the code block does not raise the expected exception (``ZeroDivisionError`` in the example + above), or no exception at all, the check will fail instead. + + You can also use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") + + The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the + details of the captured exception:: + + >>> with raises(ValueError) as exc_info: + ... raise ValueError("value must be 42") + >>> assert exc_info.type is ValueError + >>> assert exc_info.value.args[0] == "value must be 42" + + .. deprecated:: 4.1 + + In the context manager form you may use the keyword argument + ``message`` to specify a custom failure message that will be displayed + in case the ``pytest.raises`` check fails. This has been deprecated as it + is considered error prone as users often mean to use ``match`` instead. + + .. note:: + + When using ``pytest.raises`` as a context manager, it's worthwhile to + note that normal context manager rules apply and that the exception + raised *must* be the final line in the scope of the context manager. + Lines of code after that, within the scope of the context manager will + not be executed. For example:: + + >>> value = 15 + >>> with raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert exc_info.type is ValueError # this will not execute + + Instead, the following approach must be taken (note the difference in + scope):: + + >>> with raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert exc_info.type is ValueError + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` + it is possible to parametrize tests such that + some runs raise an exception and others do not. + + See :ref:`parametrizing_conditional_raising` for an example. + + **Legacy form** + + It is possible to specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + The form above is fully supported but discouraged for new code because the + context manager form is regarded as more readable and less error-prone. + + .. note:: + Similar to caught exception objects in Python, explicitly clearing + local references to returned ``ExceptionInfo`` objects can + help the Python interpreter speed up its garbage collection. + + Clearing those references breaks a reference cycle + (``ExceptionInfo`` --> caught exception --> frame stack raising + the exception --> current frame stack --> local variables --> + ``ExceptionInfo``) which makes Python keep all objects referenced + from that cycle (including all local variables in the current + frame) alive until the next cyclic garbage collection run. See the + official Python ``try`` statement documentation for more detailed + information. + + """ + __tracebackhide__ = True + for exc in filterfalse(isclass, always_iterable(expected_exception, BASE_TYPE)): + msg = ( + "exceptions must be old-style classes or" + " derived from BaseException, not %s" + ) + raise TypeError(msg % type(exc)) + + message = "DID NOT RAISE {}".format(expected_exception) + match_expr = None + + if not args: + if "message" in kwargs: + message = kwargs.pop("message") + warnings.warn(deprecated.RAISES_MESSAGE_PARAMETER, stacklevel=2) + if "match" in kwargs: + match_expr = kwargs.pop("match") + if kwargs: + msg = "Unexpected keyword arguments passed to pytest.raises: " + msg += ", ".join(kwargs.keys()) + raise TypeError(msg) + return RaisesContext(expected_exception, message, match_expr) + elif isinstance(args[0], str): + warnings.warn(deprecated.RAISES_EXEC, stacklevel=2) + code, = args + assert isinstance(code, str) + frame = sys._getframe(1) + loc = frame.f_locals.copy() + loc.update(kwargs) + # print "raises frame scope: %r" % frame.f_locals + try: + code = _pytest._code.Source(code).compile(_genframe=frame) + six.exec_(code, frame.f_globals, loc) + # XXX didn't mean f_globals == f_locals something special? + # this is destroyed here ... + except expected_exception: + return _pytest._code.ExceptionInfo.from_current() + else: + func = args[0] + try: + func(*args[1:], **kwargs) + except expected_exception: + return _pytest._code.ExceptionInfo.from_current() + fail(message) + + +raises.Exception = fail.Exception + + +class RaisesContext(object): + def __init__(self, expected_exception, message, match_expr): + self.expected_exception = expected_exception + self.message = message + self.match_expr = match_expr + self.excinfo = None + + def __enter__(self): + self.excinfo = _pytest._code.ExceptionInfo.for_later() + return self.excinfo + + def __exit__(self, *tp): + __tracebackhide__ = True + if tp[0] is None: + fail(self.message) + self.excinfo.__init__(tp) + suppress_exception = issubclass(self.excinfo.type, self.expected_exception) + if sys.version_info[0] == 2 and suppress_exception: + sys.exc_clear() + if self.match_expr is not None and suppress_exception: + self.excinfo.match(self.match_expr) + return suppress_exception diff --git a/env_27/lib/python2.7/site-packages/_pytest/recwarn.py b/env_27/lib/python2.7/site-packages/_pytest/recwarn.py new file mode 100644 index 0000000..3e2ec86 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/recwarn.py @@ -0,0 +1,250 @@ +""" recording warnings during test function execution. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import inspect +import re +import sys +import warnings + +import six + +import _pytest._code +from _pytest.deprecated import PYTEST_WARNS_UNKNOWN_KWARGS +from _pytest.deprecated import WARNS_EXEC +from _pytest.fixtures import yield_fixture +from _pytest.outcomes import fail + + +@yield_fixture +def recwarn(): + """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. + + See http://docs.python.org/library/warnings.html for information + on warning categories. + """ + wrec = WarningsRecorder() + with wrec: + warnings.simplefilter("default") + yield wrec + + +def deprecated_call(func=None, *args, **kwargs): + """context manager that can be used to ensure a block of code triggers a + ``DeprecationWarning`` or ``PendingDeprecationWarning``:: + + >>> import warnings + >>> def api_call_v2(): + ... warnings.warn('use v3 of this api', DeprecationWarning) + ... return 200 + + >>> with deprecated_call(): + ... assert api_call_v2() == 200 + + ``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``, + in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings + types above. + """ + __tracebackhide__ = True + if func is not None: + args = (func,) + args + return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs) + + +def warns(expected_warning, *args, **kwargs): + r"""Assert that code raises a particular class of warning. + + Specifically, the parameter ``expected_warning`` can be a warning class or + sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or + classes. + + This helper produces a list of :class:`warnings.WarningMessage` objects, + one for each warning raised. + + This function can be used as a context manager, or any of the other ways + ``pytest.raises`` can be used:: + + >>> with warns(RuntimeWarning): + ... warnings.warn("my warning", RuntimeWarning) + + In the context manager form you may use the keyword argument ``match`` to assert + that the exception matches a text or regex:: + + >>> with warns(UserWarning, match='must be 0 or None'): + ... warnings.warn("value must be 0 or None", UserWarning) + + >>> with warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("value must be 42", UserWarning) + + >>> with warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted... + + """ + __tracebackhide__ = True + if not args: + match_expr = kwargs.pop("match", None) + if kwargs: + warnings.warn( + PYTEST_WARNS_UNKNOWN_KWARGS.format(args=sorted(kwargs)), stacklevel=2 + ) + return WarningsChecker(expected_warning, match_expr=match_expr) + elif isinstance(args[0], str): + warnings.warn(WARNS_EXEC, stacklevel=2) + code, = args + assert isinstance(code, str) + frame = sys._getframe(1) + loc = frame.f_locals.copy() + loc.update(kwargs) + + with WarningsChecker(expected_warning): + code = _pytest._code.Source(code).compile() + six.exec_(code, frame.f_globals, loc) + else: + func = args[0] + with WarningsChecker(expected_warning): + return func(*args[1:], **kwargs) + + +class WarningsRecorder(warnings.catch_warnings): + """A context manager to record raised warnings. + + Adapted from `warnings.catch_warnings`. + """ + + def __init__(self): + super(WarningsRecorder, self).__init__(record=True) + self._entered = False + self._list = [] + + @property + def list(self): + """The list of recorded warnings.""" + return self._list + + def __getitem__(self, i): + """Get a recorded warning by index.""" + return self._list[i] + + def __iter__(self): + """Iterate through the recorded warnings.""" + return iter(self._list) + + def __len__(self): + """The number of recorded warnings.""" + return len(self._list) + + def pop(self, cls=Warning): + """Pop the first recorded warning, raise exception if not exists.""" + for i, w in enumerate(self._list): + if issubclass(w.category, cls): + return self._list.pop(i) + __tracebackhide__ = True + raise AssertionError("%r not found in warning list" % cls) + + def clear(self): + """Clear the list of recorded warnings.""" + self._list[:] = [] + + def __enter__(self): + if self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot enter %r twice" % self) + self._list = super(WarningsRecorder, self).__enter__() + warnings.simplefilter("always") + # python3 keeps track of a "filter version", when the filters are + # updated previously seen warnings can be re-warned. python2 has no + # concept of this so we must reset the warnings registry manually. + # trivial patching of `warnings.warn` seems to be enough somehow? + if six.PY2: + + def warn(message, category=None, stacklevel=1): + # duplicate the stdlib logic due to + # bad handing in the c version of warnings + if isinstance(message, Warning): + category = message.__class__ + # Check category argument + if category is None: + category = UserWarning + assert issubclass(category, Warning) + + # emulate resetting the warn registry + f_globals = sys._getframe(stacklevel).f_globals + if "__warningregistry__" in f_globals: + orig = f_globals["__warningregistry__"] + f_globals["__warningregistry__"] = None + try: + return self._saved_warn(message, category, stacklevel + 1) + finally: + f_globals["__warningregistry__"] = orig + else: + return self._saved_warn(message, category, stacklevel + 1) + + warnings.warn, self._saved_warn = warn, warnings.warn + return self + + def __exit__(self, *exc_info): + if not self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot exit %r without entering first" % self) + # see above where `self._saved_warn` is assigned + if six.PY2: + warnings.warn = self._saved_warn + super(WarningsRecorder, self).__exit__(*exc_info) + + # Built-in catch_warnings does not reset entered state so we do it + # manually here for this context manager to become reusable. + self._entered = False + + +class WarningsChecker(WarningsRecorder): + def __init__(self, expected_warning=None, match_expr=None): + super(WarningsChecker, self).__init__() + + msg = "exceptions must be old-style classes or derived from Warning, not %s" + if isinstance(expected_warning, tuple): + for exc in expected_warning: + if not inspect.isclass(exc): + raise TypeError(msg % type(exc)) + elif inspect.isclass(expected_warning): + expected_warning = (expected_warning,) + elif expected_warning is not None: + raise TypeError(msg % type(expected_warning)) + + self.expected_warning = expected_warning + self.match_expr = match_expr + + def __exit__(self, *exc_info): + super(WarningsChecker, self).__exit__(*exc_info) + + __tracebackhide__ = True + + # only check if we're not currently handling an exception + if all(a is None for a in exc_info): + if self.expected_warning is not None: + if not any(issubclass(r.category, self.expected_warning) for r in self): + __tracebackhide__ = True + fail( + "DID NOT WARN. No warnings of type {} was emitted. " + "The list of emitted warnings is: {}.".format( + self.expected_warning, [each.message for each in self] + ) + ) + elif self.match_expr is not None: + for r in self: + if issubclass(r.category, self.expected_warning): + if re.compile(self.match_expr).search(str(r.message)): + break + else: + fail( + "DID NOT WARN. No warnings of type {} matching" + " ('{}') was emitted. The list of emitted warnings" + " is: {}.".format( + self.expected_warning, + self.match_expr, + [each.message for each in self], + ) + ) diff --git a/env_27/lib/python2.7/site-packages/_pytest/reports.py b/env_27/lib/python2.7/site-packages/_pytest/reports.py new file mode 100644 index 0000000..d2df4d2 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/reports.py @@ -0,0 +1,427 @@ +from pprint import pprint + +import py +import six + +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ReprEntry +from _pytest._code.code import ReprEntryNative +from _pytest._code.code import ReprExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import ReprFuncArgs +from _pytest._code.code import ReprLocals +from _pytest._code.code import ReprTraceback +from _pytest._code.code import TerminalRepr +from _pytest.outcomes import skip +from _pytest.pathlib import Path + + +def getslaveinfoline(node): + try: + return node._slaveinfocache + except AttributeError: + d = node.slaveinfo + ver = "%s.%s.%s" % d["version_info"][:3] + node._slaveinfocache = s = "[%s] %s -- Python %s %s" % ( + d["id"], + d["sysplatform"], + ver, + d["executable"], + ) + return s + + +class BaseReport(object): + when = None + location = None + + def __init__(self, **kw): + self.__dict__.update(kw) + + def toterminal(self, out): + if hasattr(self, "node"): + out.line(getslaveinfoline(self.node)) + + longrepr = self.longrepr + if longrepr is None: + return + + if hasattr(longrepr, "toterminal"): + longrepr.toterminal(out) + else: + try: + out.line(longrepr) + except UnicodeEncodeError: + out.line("") + + def get_sections(self, prefix): + for name, content in self.sections: + if name.startswith(prefix): + yield prefix, content + + @property + def longreprtext(self): + """ + Read-only property that returns the full string representation + of ``longrepr``. + + .. versionadded:: 3.0 + """ + tw = py.io.TerminalWriter(stringio=True) + tw.hasmarkup = False + self.toterminal(tw) + exc = tw.stringio.getvalue() + return exc.strip() + + @property + def caplog(self): + """Return captured log lines, if log capturing is enabled + + .. versionadded:: 3.5 + """ + return "\n".join( + content for (prefix, content) in self.get_sections("Captured log") + ) + + @property + def capstdout(self): + """Return captured text from stdout, if capturing is enabled + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stdout") + ) + + @property + def capstderr(self): + """Return captured text from stderr, if capturing is enabled + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stderr") + ) + + passed = property(lambda x: x.outcome == "passed") + failed = property(lambda x: x.outcome == "failed") + skipped = property(lambda x: x.outcome == "skipped") + + @property + def fspath(self): + return self.nodeid.split("::")[0] + + @property + def count_towards_summary(self): + """ + **Experimental** + + Returns True if this report should be counted towards the totals shown at the end of the + test session: "1 passed, 1 failure, etc". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + return True + + @property + def head_line(self): + """ + **Experimental** + + Returns the head line shown with longrepr output for this report, more commonly during + traceback representation during failures:: + + ________ Test.foo ________ + + + In the example above, the head_line is "Test.foo". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + if self.location is not None: + fspath, lineno, domain = self.location + return domain + + def _to_json(self): + """ + This was originally the serialize_report() function from xdist (ca03269). + + Returns the contents of this report as a dict of builtin entries, suitable for + serialization. + + Experimental method. + """ + + def disassembled_report(rep): + reprtraceback = rep.longrepr.reprtraceback.__dict__.copy() + reprcrash = rep.longrepr.reprcrash.__dict__.copy() + + new_entries = [] + for entry in reprtraceback["reprentries"]: + entry_data = { + "type": type(entry).__name__, + "data": entry.__dict__.copy(), + } + for key, value in entry_data["data"].items(): + if hasattr(value, "__dict__"): + entry_data["data"][key] = value.__dict__.copy() + new_entries.append(entry_data) + + reprtraceback["reprentries"] = new_entries + + return { + "reprcrash": reprcrash, + "reprtraceback": reprtraceback, + "sections": rep.longrepr.sections, + } + + d = self.__dict__.copy() + if hasattr(self.longrepr, "toterminal"): + if hasattr(self.longrepr, "reprtraceback") and hasattr( + self.longrepr, "reprcrash" + ): + d["longrepr"] = disassembled_report(self) + else: + d["longrepr"] = six.text_type(self.longrepr) + else: + d["longrepr"] = self.longrepr + for name in d: + if isinstance(d[name], (py.path.local, Path)): + d[name] = str(d[name]) + elif name == "result": + d[name] = None # for now + return d + + @classmethod + def _from_json(cls, reportdict): + """ + This was originally the serialize_report() function from xdist (ca03269). + + Factory method that returns either a TestReport or CollectReport, depending on the calling + class. It's the callers responsibility to know which class to pass here. + + Experimental method. + """ + if reportdict["longrepr"]: + if ( + "reprcrash" in reportdict["longrepr"] + and "reprtraceback" in reportdict["longrepr"] + ): + + reprtraceback = reportdict["longrepr"]["reprtraceback"] + reprcrash = reportdict["longrepr"]["reprcrash"] + + unserialized_entries = [] + reprentry = None + for entry_data in reprtraceback["reprentries"]: + data = entry_data["data"] + entry_type = entry_data["type"] + if entry_type == "ReprEntry": + reprfuncargs = None + reprfileloc = None + reprlocals = None + if data["reprfuncargs"]: + reprfuncargs = ReprFuncArgs(**data["reprfuncargs"]) + if data["reprfileloc"]: + reprfileloc = ReprFileLocation(**data["reprfileloc"]) + if data["reprlocals"]: + reprlocals = ReprLocals(data["reprlocals"]["lines"]) + + reprentry = ReprEntry( + lines=data["lines"], + reprfuncargs=reprfuncargs, + reprlocals=reprlocals, + filelocrepr=reprfileloc, + style=data["style"], + ) + elif entry_type == "ReprEntryNative": + reprentry = ReprEntryNative(data["lines"]) + else: + _report_unserialization_failure(entry_type, cls, reportdict) + unserialized_entries.append(reprentry) + reprtraceback["reprentries"] = unserialized_entries + + exception_info = ReprExceptionInfo( + reprtraceback=ReprTraceback(**reprtraceback), + reprcrash=ReprFileLocation(**reprcrash), + ) + + for section in reportdict["longrepr"]["sections"]: + exception_info.addsection(*section) + reportdict["longrepr"] = exception_info + + return cls(**reportdict) + + +def _report_unserialization_failure(type_name, report_class, reportdict): + url = "https://github.com/pytest-dev/pytest/issues" + stream = py.io.TextIO() + pprint("-" * 100, stream=stream) + pprint("INTERNALERROR: Unknown entry type returned: %s" % type_name, stream=stream) + pprint("report_name: %s" % report_class, stream=stream) + pprint(reportdict, stream=stream) + pprint("Please report this bug at %s" % url, stream=stream) + pprint("-" * 100, stream=stream) + raise RuntimeError(stream.getvalue()) + + +class TestReport(BaseReport): + """ Basic test report object (also used for setup and teardown calls if + they fail). + """ + + __test__ = False + + def __init__( + self, + nodeid, + location, + keywords, + outcome, + longrepr, + when, + sections=(), + duration=0, + user_properties=None, + **extra + ): + #: normalized collection node id + self.nodeid = nodeid + + #: a (filesystempath, lineno, domaininfo) tuple indicating the + #: actual location of a test item - it might be different from the + #: collected one e.g. if a method is inherited from a different module. + self.location = location + + #: a name -> value dictionary containing all keywords and + #: markers associated with a test invocation. + self.keywords = keywords + + #: test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: one of 'setup', 'call', 'teardown' to indicate runtest phase. + self.when = when + + #: user properties is a list of tuples (name, value) that holds user + #: defined properties of the test + self.user_properties = list(user_properties or []) + + #: list of pairs ``(str, str)`` of extra information which needs to + #: marshallable. Used by pytest to add captured text + #: from ``stdout`` and ``stderr``, but may be used by other plugins + #: to add arbitrary information to reports. + self.sections = list(sections) + + #: time it took to run just the test + self.duration = duration + + self.__dict__.update(extra) + + def __repr__(self): + return "" % ( + self.nodeid, + self.when, + self.outcome, + ) + + @classmethod + def from_item_and_call(cls, item, call): + """ + Factory method to create and fill a TestReport with standard item and call info. + """ + when = call.when + duration = call.stop - call.start + keywords = {x: 1 for x in item.keywords} + excinfo = call.excinfo + sections = [] + if not call.excinfo: + outcome = "passed" + longrepr = None + else: + if not isinstance(excinfo, ExceptionInfo): + outcome = "failed" + longrepr = excinfo + elif excinfo.errisinstance(skip.Exception): + outcome = "skipped" + r = excinfo._getreprcrash() + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: # exception in setup or teardown + longrepr = item._repr_failure_py( + excinfo, style=item.config.option.tbstyle + ) + for rwhen, key, content in item._report_sections: + sections.append(("Captured %s %s" % (key, rwhen), content)) + return cls( + item.nodeid, + item.location, + keywords, + outcome, + longrepr, + when, + sections, + duration, + user_properties=item.user_properties, + ) + + +class CollectReport(BaseReport): + when = "collect" + + def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra): + self.nodeid = nodeid + self.outcome = outcome + self.longrepr = longrepr + self.result = result or [] + self.sections = list(sections) + self.__dict__.update(extra) + + @property + def location(self): + return (self.fspath, None, self.fspath) + + def __repr__(self): + return "" % ( + self.nodeid, + len(self.result), + self.outcome, + ) + + +class CollectErrorRepr(TerminalRepr): + def __init__(self, msg): + self.longrepr = msg + + def toterminal(self, out): + out.line(self.longrepr, red=True) + + +def pytest_report_to_serializable(report): + if isinstance(report, (TestReport, CollectReport)): + data = report._to_json() + data["_report_type"] = report.__class__.__name__ + return data + + +def pytest_report_from_serializable(data): + if "_report_type" in data: + if data["_report_type"] == "TestReport": + return TestReport._from_json(data) + elif data["_report_type"] == "CollectReport": + return CollectReport._from_json(data) + assert False, "Unknown report_type unserialize data: {}".format( + data["_report_type"] + ) diff --git a/env_27/lib/python2.7/site-packages/_pytest/resultlog.py b/env_27/lib/python2.7/site-packages/_pytest/resultlog.py new file mode 100644 index 0000000..cd3fde8 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/resultlog.py @@ -0,0 +1,101 @@ +""" log machine-parseable test session result information in a plain +text file. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import py + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "resultlog plugin options") + group.addoption( + "--resultlog", + "--result-log", + action="store", + metavar="path", + default=None, + help="DEPRECATED path for machine-readable result log.", + ) + + +def pytest_configure(config): + resultlog = config.option.resultlog + # prevent opening resultlog on slave nodes (xdist) + if resultlog and not hasattr(config, "slaveinput"): + dirname = os.path.dirname(os.path.abspath(resultlog)) + if not os.path.isdir(dirname): + os.makedirs(dirname) + logfile = open(resultlog, "w", 1) # line buffered + config._resultlog = ResultLog(config, logfile) + config.pluginmanager.register(config._resultlog) + + from _pytest.deprecated import RESULT_LOG + from _pytest.warnings import _issue_warning_captured + + _issue_warning_captured(RESULT_LOG, config.hook, stacklevel=2) + + +def pytest_unconfigure(config): + resultlog = getattr(config, "_resultlog", None) + if resultlog: + resultlog.logfile.close() + del config._resultlog + config.pluginmanager.unregister(resultlog) + + +class ResultLog(object): + def __init__(self, config, logfile): + self.config = config + self.logfile = logfile # preferably line buffered + + def write_log_entry(self, testpath, lettercode, longrepr): + print("%s %s" % (lettercode, testpath), file=self.logfile) + for line in longrepr.splitlines(): + print(" %s" % line, file=self.logfile) + + def log_outcome(self, report, lettercode, longrepr): + testpath = getattr(report, "nodeid", None) + if testpath is None: + testpath = report.fspath + self.write_log_entry(testpath, lettercode, longrepr) + + def pytest_runtest_logreport(self, report): + if report.when != "call" and report.passed: + return + res = self.config.hook.pytest_report_teststatus( + report=report, config=self.config + ) + code = res[1] + if code == "x": + longrepr = str(report.longrepr) + elif code == "X": + longrepr = "" + elif report.passed: + longrepr = "" + elif report.failed: + longrepr = str(report.longrepr) + elif report.skipped: + longrepr = str(report.longrepr[2]) + self.log_outcome(report, code, longrepr) + + def pytest_collectreport(self, report): + if not report.passed: + if report.failed: + code = "F" + longrepr = str(report.longrepr) + else: + assert report.skipped + code = "S" + longrepr = "%s:%d: %s" % report.longrepr + self.log_outcome(report, code, longrepr) + + def pytest_internalerror(self, excrepr): + reprcrash = getattr(excrepr, "reprcrash", None) + path = getattr(reprcrash, "path", None) + if path is None: + path = "cwd:%s" % py.path.local() + self.write_log_entry(path, "!", str(excrepr)) diff --git a/env_27/lib/python2.7/site-packages/_pytest/runner.py b/env_27/lib/python2.7/site-packages/_pytest/runner.py new file mode 100644 index 0000000..1f09f42 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/runner.py @@ -0,0 +1,375 @@ +""" basic collect and runtest protocol implementations """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import bdb +import os +import sys +from time import time + +import attr +import six + +from .reports import CollectErrorRepr +from .reports import CollectReport +from .reports import TestReport +from _pytest._code.code import ExceptionInfo +from _pytest.outcomes import Exit +from _pytest.outcomes import skip +from _pytest.outcomes import Skipped +from _pytest.outcomes import TEST_OUTCOME + +# +# pytest plugin hooks + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "reporting", after="general") + group.addoption( + "--durations", + action="store", + type=int, + default=None, + metavar="N", + help="show N slowest setup/test durations (N=0 for all).", + ), + + +def pytest_terminal_summary(terminalreporter): + durations = terminalreporter.config.option.durations + verbose = terminalreporter.config.getvalue("verbose") + if durations is None: + return + tr = terminalreporter + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if not dlist: + return + dlist.sort(key=lambda x: x.duration) + dlist.reverse() + if not durations: + tr.write_sep("=", "slowest test durations") + else: + tr.write_sep("=", "slowest %s test durations" % durations) + dlist = dlist[:durations] + + for rep in dlist: + if verbose < 2 and rep.duration < 0.005: + tr.write_line("") + tr.write_line("(0.00 durations hidden. Use -vv to show these durations.)") + break + tr.write_line("%02.2fs %-8s %s" % (rep.duration, rep.when, rep.nodeid)) + + +def pytest_sessionstart(session): + session._setupstate = SetupState() + + +def pytest_sessionfinish(session): + session._setupstate.teardown_all() + + +def pytest_runtest_protocol(item, nextitem): + item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + runtestprotocol(item, nextitem=nextitem) + item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + return True + + +def runtestprotocol(item, log=True, nextitem=None): + hasrequest = hasattr(item, "_request") + if hasrequest and not item._request: + item._initrequest() + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + if item.config.getoption("setupshow", False): + show_test_item(item) + if not item.config.getoption("setuponly", False): + reports.append(call_and_report(item, "call", log)) + reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) + # after all teardown hooks have been called + # want funcargs and request info to go away + if hasrequest: + item._request = False + item.funcargs = None + return reports + + +def show_test_item(item): + """Show test function, parameters and the fixtures of the test item.""" + tw = item.config.get_terminal_writer() + tw.line() + tw.write(" " * 8) + tw.write(item._nodeid) + used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys()) + if used_fixtures: + tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) + + +def pytest_runtest_setup(item): + _update_current_test_var(item, "setup") + item.session._setupstate.prepare(item) + + +def pytest_runtest_call(item): + _update_current_test_var(item, "call") + sys.last_type, sys.last_value, sys.last_traceback = (None, None, None) + try: + item.runtest() + except Exception: + # Store trace info to allow postmortem debugging + type, value, tb = sys.exc_info() + tb = tb.tb_next # Skip *this* frame + sys.last_type = type + sys.last_value = value + sys.last_traceback = tb + del type, value, tb # Get rid of these in this frame + raise + + +def pytest_runtest_teardown(item, nextitem): + _update_current_test_var(item, "teardown") + item.session._setupstate.teardown_exact(item, nextitem) + _update_current_test_var(item, None) + + +def _update_current_test_var(item, when): + """ + Update PYTEST_CURRENT_TEST to reflect the current item and stage. + + If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment. + """ + var_name = "PYTEST_CURRENT_TEST" + if when: + value = "{} ({})".format(item.nodeid, when) + # don't allow null bytes on environment variables (see #2644, #2957) + value = value.replace("\x00", "(null)") + os.environ[var_name] = value + else: + os.environ.pop(var_name) + + +def pytest_report_teststatus(report): + if report.when in ("setup", "teardown"): + if report.failed: + # category, shortletter, verbose-word + return "error", "E", "ERROR" + elif report.skipped: + return "skipped", "s", "SKIPPED" + else: + return "", "", "" + + +# +# Implementation + + +def call_and_report(item, when, log=True, **kwds): + call = call_runtest_hook(item, when, **kwds) + hook = item.ihook + report = hook.pytest_runtest_makereport(item=item, call=call) + if log: + hook.pytest_runtest_logreport(report=report) + if check_interactive_exception(call, report): + hook.pytest_exception_interact(node=item, call=call, report=report) + return report + + +def check_interactive_exception(call, report): + return call.excinfo and not ( + hasattr(report, "wasxfail") + or call.excinfo.errisinstance(skip.Exception) + or call.excinfo.errisinstance(bdb.BdbQuit) + ) + + +def call_runtest_hook(item, when, **kwds): + hookname = "pytest_runtest_" + when + ihook = getattr(item.ihook, hookname) + reraise = (Exit,) + if not item.config.getoption("usepdb", False): + reraise += (KeyboardInterrupt,) + return CallInfo.from_call( + lambda: ihook(item=item, **kwds), when=when, reraise=reraise + ) + + +@attr.s(repr=False) +class CallInfo(object): + """ Result/Exception info a function invocation. """ + + _result = attr.ib() + # Optional[ExceptionInfo] + excinfo = attr.ib() + start = attr.ib() + stop = attr.ib() + when = attr.ib() + + @property + def result(self): + if self.excinfo is not None: + raise AttributeError("{!r} has no valid result".format(self)) + return self._result + + @classmethod + def from_call(cls, func, when, reraise=None): + #: context of invocation: one of "setup", "call", + #: "teardown", "memocollect" + start = time() + excinfo = None + try: + result = func() + except: # noqa + excinfo = ExceptionInfo.from_current() + if reraise is not None and excinfo.errisinstance(reraise): + raise + result = None + stop = time() + return cls(start=start, stop=stop, when=when, result=result, excinfo=excinfo) + + def __repr__(self): + if self.excinfo is not None: + status = "exception" + value = self.excinfo.value + else: + # TODO: investigate unification + value = repr(self._result) + status = "result" + return "".format( + when=self.when, value=value, status=status + ) + + +def pytest_runtest_makereport(item, call): + return TestReport.from_item_and_call(item, call) + + +def pytest_make_collect_report(collector): + call = CallInfo.from_call(lambda: list(collector.collect()), "collect") + longrepr = None + if not call.excinfo: + outcome = "passed" + else: + from _pytest import nose + + skip_exceptions = (Skipped,) + nose.get_skip_exceptions() + if call.excinfo.errisinstance(skip_exceptions): + outcome = "skipped" + r = collector._repr_failure_py(call.excinfo, "line").reprcrash + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + errorinfo = collector.repr_failure(call.excinfo) + if not hasattr(errorinfo, "toterminal"): + errorinfo = CollectErrorRepr(errorinfo) + longrepr = errorinfo + rep = CollectReport( + collector.nodeid, outcome, longrepr, getattr(call, "result", None) + ) + rep.call = call # see collect_one_node + return rep + + +class SetupState(object): + """ shared state for setting up/tearing down test items or collectors. """ + + def __init__(self): + self.stack = [] + self._finalizers = {} + + def addfinalizer(self, finalizer, colitem): + """ attach a finalizer to the given colitem. + if colitem is None, this will add a finalizer that + is called at the end of teardown_all(). + """ + assert colitem and not isinstance(colitem, tuple) + assert callable(finalizer) + # assert colitem in self.stack # some unit tests don't setup stack :/ + self._finalizers.setdefault(colitem, []).append(finalizer) + + def _pop_and_teardown(self): + colitem = self.stack.pop() + self._teardown_with_finalization(colitem) + + def _callfinalizers(self, colitem): + finalizers = self._finalizers.pop(colitem, None) + exc = None + while finalizers: + fin = finalizers.pop() + try: + fin() + except TEST_OUTCOME: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = sys.exc_info() + if exc: + six.reraise(*exc) + + def _teardown_with_finalization(self, colitem): + self._callfinalizers(colitem) + if hasattr(colitem, "teardown"): + colitem.teardown() + for colitem in self._finalizers: + assert ( + colitem is None or colitem in self.stack or isinstance(colitem, tuple) + ) + + def teardown_all(self): + while self.stack: + self._pop_and_teardown() + for key in list(self._finalizers): + self._teardown_with_finalization(key) + assert not self._finalizers + + def teardown_exact(self, item, nextitem): + needed_collectors = nextitem and nextitem.listchain() or [] + self._teardown_towards(needed_collectors) + + def _teardown_towards(self, needed_collectors): + exc = None + while self.stack: + if self.stack == needed_collectors[: len(self.stack)]: + break + try: + self._pop_and_teardown() + except TEST_OUTCOME: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = sys.exc_info() + if exc: + six.reraise(*exc) + + def prepare(self, colitem): + """ setup objects along the collector chain to the test-method + and teardown previously setup objects.""" + needed_collectors = colitem.listchain() + self._teardown_towards(needed_collectors) + + # check if the last collection node has raised an error + for col in self.stack: + if hasattr(col, "_prepare_exc"): + six.reraise(*col._prepare_exc) + for col in needed_collectors[len(self.stack) :]: + self.stack.append(col) + try: + col.setup() + except TEST_OUTCOME: + col._prepare_exc = sys.exc_info() + raise + + +def collect_one_node(collector): + ihook = collector.ihook + ihook.pytest_collectstart(collector=collector) + rep = ihook.pytest_make_collect_report(collector=collector) + call = rep.__dict__.pop("call", None) + if call and check_interactive_exception(call, rep): + ihook.pytest_exception_interact(node=collector, call=call, report=rep) + return rep diff --git a/env_27/lib/python2.7/site-packages/_pytest/setuponly.py b/env_27/lib/python2.7/site-packages/_pytest/setuponly.py new file mode 100644 index 0000000..4bd4ad6 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/setuponly.py @@ -0,0 +1,88 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys + +import pytest + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption( + "--setuponly", + "--setup-only", + action="store_true", + help="only setup fixtures, do not execute tests.", + ) + group.addoption( + "--setupshow", + "--setup-show", + action="store_true", + help="show setup of fixtures while executing tests.", + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_fixture_setup(fixturedef, request): + yield + config = request.config + if config.option.setupshow: + if hasattr(request, "param"): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + fixturedef.cached_param = fixturedef.ids(request.param) + else: + fixturedef.cached_param = fixturedef.ids[request.param_index] + else: + fixturedef.cached_param = request.param + _show_fixture_action(fixturedef, "SETUP") + + +def pytest_fixture_post_finalizer(fixturedef): + if hasattr(fixturedef, "cached_result"): + config = fixturedef._fixturemanager.config + if config.option.setupshow: + _show_fixture_action(fixturedef, "TEARDOWN") + if hasattr(fixturedef, "cached_param"): + del fixturedef.cached_param + + +def _show_fixture_action(fixturedef, msg): + config = fixturedef._fixturemanager.config + capman = config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture() + out, err = capman.read_global_capture() + + tw = config.get_terminal_writer() + tw.line() + tw.write(" " * 2 * fixturedef.scopenum) + tw.write( + "{step} {scope} {fixture}".format( + step=msg.ljust(8), # align the output to TEARDOWN + scope=fixturedef.scope[0].upper(), + fixture=fixturedef.argname, + ) + ) + + if msg == "SETUP": + deps = sorted(arg for arg in fixturedef.argnames if arg != "request") + if deps: + tw.write(" (fixtures used: {})".format(", ".join(deps))) + + if hasattr(fixturedef, "cached_param"): + tw.write("[{}]".format(fixturedef.cached_param)) + + if capman: + capman.resume_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config): + if config.option.setuponly: + config.option.setupshow = True diff --git a/env_27/lib/python2.7/site-packages/_pytest/setupplan.py b/env_27/lib/python2.7/site-packages/_pytest/setupplan.py new file mode 100644 index 0000000..351e0be --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/setupplan.py @@ -0,0 +1,31 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pytest + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption( + "--setupplan", + "--setup-plan", + action="store_true", + help="show what fixtures and tests would be executed but " + "don't execute anything.", + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup(fixturedef, request): + # Will return a dummy fixture if the setuponly option is provided. + if request.config.option.setupplan: + fixturedef.cached_result = (None, None, None) + return fixturedef.cached_result + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config): + if config.option.setupplan: + config.option.setuponly = True + config.option.setupshow = True diff --git a/env_27/lib/python2.7/site-packages/_pytest/skipping.py b/env_27/lib/python2.7/site-packages/_pytest/skipping.py new file mode 100644 index 0000000..22acafb --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/skipping.py @@ -0,0 +1,310 @@ +""" support for skip/xfail functions and markers. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from _pytest.config import hookimpl +from _pytest.mark.evaluate import MarkEvaluator +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption( + "--runxfail", + action="store_true", + dest="runxfail", + default=False, + help="run tests even if they are marked xfail", + ) + + parser.addini( + "xfail_strict", + "default for the strict parameter of xfail " + "markers when not given explicitly (default: False)", + default=False, + type="bool", + ) + + +def pytest_configure(config): + if config.option.runxfail: + # yay a hack + import pytest + + old = pytest.xfail + config._cleanup.append(lambda: setattr(pytest, "xfail", old)) + + def nop(*args, **kwargs): + pass + + nop.Exception = xfail.Exception + setattr(pytest, "xfail", nop) + + config.addinivalue_line( + "markers", + "skip(reason=None): skip the given test function with an optional reason. " + 'Example: skip(reason="no way of currently testing this") skips the ' + "test.", + ) + config.addinivalue_line( + "markers", + "skipif(condition): skip the given test function if eval(condition) " + "results in a True value. Evaluation happens within the " + "module global context. Example: skipif('sys.platform == \"win32\"') " + "skips the test if we are on the win32 platform. see " + "https://docs.pytest.org/en/latest/skipping.html", + ) + config.addinivalue_line( + "markers", + "xfail(condition, reason=None, run=True, raises=None, strict=False): " + "mark the test function as an expected failure if eval(condition) " + "has a True value. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See https://docs.pytest.org/en/latest/skipping.html", + ) + + +@hookimpl(tryfirst=True) +def pytest_runtest_setup(item): + # Check if skip or skipif are specified as pytest marks + item._skipped_by_mark = False + eval_skipif = MarkEvaluator(item, "skipif") + if eval_skipif.istrue(): + item._skipped_by_mark = True + skip(eval_skipif.getexplanation()) + + for skip_info in item.iter_markers(name="skip"): + item._skipped_by_mark = True + if "reason" in skip_info.kwargs: + skip(skip_info.kwargs["reason"]) + elif skip_info.args: + skip(skip_info.args[0]) + else: + skip("unconditional skip") + + item._evalxfail = MarkEvaluator(item, "xfail") + check_xfail_no_run(item) + + +@hookimpl(hookwrapper=True) +def pytest_pyfunc_call(pyfuncitem): + check_xfail_no_run(pyfuncitem) + outcome = yield + passed = outcome.excinfo is None + if passed: + check_strict_xfail(pyfuncitem) + + +def check_xfail_no_run(item): + """check xfail(run=False)""" + if not item.config.option.runxfail: + evalxfail = item._evalxfail + if evalxfail.istrue(): + if not evalxfail.get("run", True): + xfail("[NOTRUN] " + evalxfail.getexplanation()) + + +def check_strict_xfail(pyfuncitem): + """check xfail(strict=True) for the given PASSING test""" + evalxfail = pyfuncitem._evalxfail + if evalxfail.istrue(): + strict_default = pyfuncitem.config.getini("xfail_strict") + is_strict_xfail = evalxfail.get("strict", strict_default) + if is_strict_xfail: + del pyfuncitem._evalxfail + explanation = evalxfail.getexplanation() + fail("[XPASS(strict)] " + explanation, pytrace=False) + + +@hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item, call): + outcome = yield + rep = outcome.get_result() + evalxfail = getattr(item, "_evalxfail", None) + # unitttest special case, see setting of _unexpectedsuccess + if hasattr(item, "_unexpectedsuccess") and rep.when == "call": + from _pytest.compat import _is_unittest_unexpected_success_a_failure + + if item._unexpectedsuccess: + rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess) + else: + rep.longrepr = "Unexpected success" + if _is_unittest_unexpected_success_a_failure(): + rep.outcome = "failed" + else: + rep.outcome = "passed" + rep.wasxfail = rep.longrepr + elif item.config.option.runxfail: + pass # don't interefere + elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): + rep.wasxfail = "reason: " + call.excinfo.value.msg + rep.outcome = "skipped" + elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue(): + if call.excinfo: + if evalxfail.invalidraise(call.excinfo.value): + rep.outcome = "failed" + else: + rep.outcome = "skipped" + rep.wasxfail = evalxfail.getexplanation() + elif call.when == "call": + strict_default = item.config.getini("xfail_strict") + is_strict_xfail = evalxfail.get("strict", strict_default) + explanation = evalxfail.getexplanation() + if is_strict_xfail: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] {}".format(explanation) + else: + rep.outcome = "passed" + rep.wasxfail = explanation + elif ( + getattr(item, "_skipped_by_mark", False) + and rep.skipped + and type(rep.longrepr) is tuple + ): + # skipped by mark.skipif; change the location of the failure + # to point to the item definition, otherwise it will display + # the location of where the skip exception was raised within pytest + filename, line, reason = rep.longrepr + filename, line = item.location[:2] + rep.longrepr = filename, line, reason + + +# called by terminalreporter progress reporting + + +def pytest_report_teststatus(report): + if hasattr(report, "wasxfail"): + if report.skipped: + return "xfailed", "x", "XFAIL" + elif report.passed: + return "xpassed", "X", "XPASS" + + +# called by the terminalreporter instance/plugin + + +def pytest_terminal_summary(terminalreporter): + tr = terminalreporter + if not tr.reportchars: + return + + lines = [] + for char in tr.reportchars: + action = REPORTCHAR_ACTIONS.get(char, lambda tr, lines: None) + action(terminalreporter, lines) + + if lines: + tr._tw.sep("=", "short test summary info") + for line in lines: + tr._tw.line(line) + + +def show_simple(terminalreporter, lines, stat): + failed = terminalreporter.stats.get(stat) + if failed: + config = terminalreporter.config + for rep in failed: + verbose_word = _get_report_str(config, rep) + pos = _get_pos(config, rep) + lines.append("%s %s" % (verbose_word, pos)) + + +def show_xfailed(terminalreporter, lines): + xfailed = terminalreporter.stats.get("xfailed") + if xfailed: + config = terminalreporter.config + for rep in xfailed: + verbose_word = _get_report_str(config, rep) + pos = _get_pos(config, rep) + lines.append("%s %s" % (verbose_word, pos)) + reason = rep.wasxfail + if reason: + lines.append(" " + str(reason)) + + +def show_xpassed(terminalreporter, lines): + xpassed = terminalreporter.stats.get("xpassed") + if xpassed: + config = terminalreporter.config + for rep in xpassed: + verbose_word = _get_report_str(config, rep) + pos = _get_pos(config, rep) + reason = rep.wasxfail + lines.append("%s %s %s" % (verbose_word, pos, reason)) + + +def folded_skips(skipped): + d = {} + for event in skipped: + key = event.longrepr + assert len(key) == 3, (event, key) + keywords = getattr(event, "keywords", {}) + # folding reports with global pytestmark variable + # this is workaround, because for now we cannot identify the scope of a skip marker + # TODO: revisit after marks scope would be fixed + if ( + event.when == "setup" + and "skip" in keywords + and "pytestmark" not in keywords + ): + key = (key[0], None, key[2]) + d.setdefault(key, []).append(event) + values = [] + for key, events in d.items(): + values.append((len(events),) + key) + return values + + +def show_skipped(terminalreporter, lines): + tr = terminalreporter + skipped = tr.stats.get("skipped", []) + if skipped: + fskips = folded_skips(skipped) + if fskips: + verbose_word = _get_report_str(terminalreporter.config, report=skipped[0]) + for num, fspath, lineno, reason in fskips: + if reason.startswith("Skipped: "): + reason = reason[9:] + if lineno is not None: + lines.append( + "%s [%d] %s:%d: %s" + % (verbose_word, num, fspath, lineno + 1, reason) + ) + else: + lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason)) + + +def shower(stat): + def show_(terminalreporter, lines): + return show_simple(terminalreporter, lines, stat) + + return show_ + + +def _get_report_str(config, report): + _category, _short, verbose = config.hook.pytest_report_teststatus( + report=report, config=config + ) + return verbose + + +def _get_pos(config, rep): + nodeid = config.cwd_relative_nodeid(rep.nodeid) + return nodeid + + +REPORTCHAR_ACTIONS = { + "x": show_xfailed, + "X": show_xpassed, + "f": shower("failed"), + "F": shower("failed"), + "s": show_skipped, + "S": show_skipped, + "p": shower("passed"), + "E": shower("error"), +} diff --git a/env_27/lib/python2.7/site-packages/_pytest/stepwise.py b/env_27/lib/python2.7/site-packages/_pytest/stepwise.py new file mode 100644 index 0000000..68e53a3 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/stepwise.py @@ -0,0 +1,114 @@ +import pytest + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption( + "--sw", + "--stepwise", + action="store_true", + dest="stepwise", + help="exit on test failure and continue from last failing test next time", + ) + group.addoption( + "--stepwise-skip", + action="store_true", + dest="stepwise_skip", + help="ignore the first failing test but stop on the next failing test", + ) + + +@pytest.hookimpl +def pytest_configure(config): + config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin") + + +class StepwisePlugin: + def __init__(self, config): + self.config = config + self.active = config.getvalue("stepwise") + self.session = None + + if self.active: + self.lastfailed = config.cache.get("cache/stepwise", None) + self.skip = config.getvalue("stepwise_skip") + + def pytest_sessionstart(self, session): + self.session = session + + def pytest_collection_modifyitems(self, session, config, items): + if not self.active: + return + if not self.lastfailed: + self.report_status = "no previously failed tests, not skipping." + return + + already_passed = [] + found = False + + # Make a list of all tests that have been run before the last failing one. + for item in items: + if item.nodeid == self.lastfailed: + found = True + break + else: + already_passed.append(item) + + # If the previously failed test was not found among the test items, + # do not skip any tests. + if not found: + self.report_status = "previously failed test not found, not skipping." + already_passed = [] + else: + self.report_status = "skipping {} already passed items.".format( + len(already_passed) + ) + + for item in already_passed: + items.remove(item) + + config.hook.pytest_deselected(items=already_passed) + + def pytest_collectreport(self, report): + if self.active and report.failed: + self.session.shouldstop = ( + "Error when collecting test, stopping test execution." + ) + + def pytest_runtest_logreport(self, report): + # Skip this hook if plugin is not active or the test is xfailed. + if not self.active or "xfail" in report.keywords: + return + + if report.failed: + if self.skip: + # Remove test from the failed ones (if it exists) and unset the skip option + # to make sure the following tests will not be skipped. + if report.nodeid == self.lastfailed: + self.lastfailed = None + + self.skip = False + else: + # Mark test as the last failing and interrupt the test session. + self.lastfailed = report.nodeid + self.session.shouldstop = ( + "Test failed, continuing from this test next run." + ) + + else: + # If the test was actually run and did pass. + if report.when == "call": + # Remove test from the failed ones, if exists. + if report.nodeid == self.lastfailed: + self.lastfailed = None + + def pytest_report_collectionfinish(self): + if self.active and self.config.getoption("verbose") >= 0: + return "stepwise: %s" % self.report_status + + def pytest_sessionfinish(self, session): + if self.active: + self.config.cache.set("cache/stepwise", self.lastfailed) + else: + # Clear the list of failing tests if the plugin is not active. + self.config.cache.set("cache/stepwise", []) diff --git a/env_27/lib/python2.7/site-packages/_pytest/terminal.py b/env_27/lib/python2.7/site-packages/_pytest/terminal.py new file mode 100644 index 0000000..06604fd --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/terminal.py @@ -0,0 +1,924 @@ +""" terminal reporting of the full testing process. + +This is a good source for looking at the various reporting hooks. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import collections +import platform +import sys +import time + +import attr +import pluggy +import py +import six +from more_itertools import collapse + +import pytest +from _pytest import nodes +from _pytest.main import EXIT_INTERRUPTED +from _pytest.main import EXIT_NOTESTSCOLLECTED +from _pytest.main import EXIT_OK +from _pytest.main import EXIT_TESTSFAILED +from _pytest.main import EXIT_USAGEERROR + +REPORT_COLLECTING_RESOLUTION = 0.5 + + +class MoreQuietAction(argparse.Action): + """ + a modified copy of the argparse count action which counts down and updates + the legacy quiet attribute at the same time + + used to unify verbosity handling + """ + + def __init__(self, option_strings, dest, default=None, required=False, help=None): + super(MoreQuietAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + new_count = getattr(namespace, self.dest, 0) - 1 + setattr(namespace, self.dest, new_count) + # todo Deprecate config.quiet + namespace.quiet = getattr(namespace, "quiet", 0) + 1 + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "reporting", after="general") + group._addoption( + "-v", + "--verbose", + action="count", + default=0, + dest="verbose", + help="increase verbosity.", + ), + group._addoption( + "-q", + "--quiet", + action=MoreQuietAction, + default=0, + dest="verbose", + help="decrease verbosity.", + ), + group._addoption( + "--verbosity", dest="verbose", type=int, default=0, help="set verbosity" + ) + group._addoption( + "-r", + action="store", + dest="reportchars", + default="", + metavar="chars", + help="show extra test summary info as specified by chars (f)ailed, " + "(E)error, (s)skipped, (x)failed, (X)passed, " + "(p)passed, (P)passed with output, (a)all except pP. " + "Warnings are displayed at all times except when " + "--disable-warnings is set", + ) + group._addoption( + "--disable-warnings", + "--disable-pytest-warnings", + default=False, + dest="disable_warnings", + action="store_true", + help="disable warnings summary", + ) + group._addoption( + "-l", + "--showlocals", + action="store_true", + dest="showlocals", + default=False, + help="show locals in tracebacks (disabled by default).", + ) + group._addoption( + "--tb", + metavar="style", + action="store", + dest="tbstyle", + default="auto", + choices=["auto", "long", "short", "no", "line", "native"], + help="traceback print mode (auto/long/short/line/native/no).", + ) + group._addoption( + "--show-capture", + action="store", + dest="showcapture", + choices=["no", "stdout", "stderr", "log", "all"], + default="all", + help="Controls how captured stdout/stderr/log is shown on failed tests. " + "Default is 'all'.", + ) + group._addoption( + "--fulltrace", + "--full-trace", + action="store_true", + default=False, + help="don't cut any tracebacks (default is to cut).", + ) + group._addoption( + "--color", + metavar="color", + action="store", + dest="color", + default="auto", + choices=["yes", "no", "auto"], + help="color terminal output (yes/no/auto).", + ) + + parser.addini( + "console_output_style", + help="console output: classic or with additional progress information (classic|progress).", + default="progress", + ) + + +def pytest_configure(config): + reporter = TerminalReporter(config, sys.stdout) + config.pluginmanager.register(reporter, "terminalreporter") + if config.option.debug or config.option.traceconfig: + + def mywriter(tags, args): + msg = " ".join(map(str, args)) + reporter.write_line("[traceconfig] " + msg) + + config.trace.root.setprocessor("pytest:config", mywriter) + + +def getreportopt(config): + reportopts = "" + reportchars = config.option.reportchars + if not config.option.disable_warnings and "w" not in reportchars: + reportchars += "w" + elif config.option.disable_warnings and "w" in reportchars: + reportchars = reportchars.replace("w", "") + if reportchars: + for char in reportchars: + if char not in reportopts and char != "a": + reportopts += char + elif char == "a": + reportopts = "sxXwEf" + return reportopts + + +def pytest_report_teststatus(report): + if report.passed: + letter = "." + elif report.skipped: + letter = "s" + elif report.failed: + letter = "F" + if report.when != "call": + letter = "f" + return report.outcome, letter, report.outcome.upper() + + +@attr.s +class WarningReport(object): + """ + Simple structure to hold warnings information captured by ``pytest_warning_captured``. + + :ivar str message: user friendly message about the warning + :ivar str|None nodeid: node id that generated the warning (see ``get_location``). + :ivar tuple|py.path.local fslocation: + file system location of the source of the warning (see ``get_location``). + """ + + message = attr.ib() + nodeid = attr.ib(default=None) + fslocation = attr.ib(default=None) + count_towards_summary = True + + def get_location(self, config): + """ + Returns the more user-friendly information about the location + of a warning, or None. + """ + if self.nodeid: + return self.nodeid + if self.fslocation: + if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2: + filename, linenum = self.fslocation[:2] + relpath = py.path.local(filename).relto(config.invocation_dir) + if not relpath: + relpath = str(filename) + return "%s:%s" % (relpath, linenum) + else: + return str(self.fslocation) + return None + + +class TerminalReporter(object): + def __init__(self, config, file=None): + import _pytest.config + + self.config = config + self._numcollected = 0 + self._session = None + self._showfspath = None + + self.stats = {} + self.startdir = py.path.local() + if file is None: + file = sys.stdout + self._tw = _pytest.config.create_terminal_writer(config, file) + # self.writer will be deprecated in pytest-3.4 + self.writer = self._tw + self._screen_width = self._tw.fullwidth + self.currentfspath = None + self.reportchars = getreportopt(config) + self.hasmarkup = self._tw.hasmarkup + self.isatty = file.isatty() + self._progress_nodeids_reported = set() + self._show_progress_info = self._determine_show_progress_info() + self._collect_report_last_write = None + + def _determine_show_progress_info(self): + """Return True if we should display progress information based on the current config""" + # do not show progress if we are not capturing output (#3038) + if self.config.getoption("capture", "no") == "no": + return False + # do not show progress if we are showing fixture setup/teardown + if self.config.getoption("setupshow", False): + return False + return self.config.getini("console_output_style") in ("progress", "count") + + @property + def verbosity(self): + return self.config.option.verbose + + @property + def showheader(self): + return self.verbosity >= 0 + + @property + def showfspath(self): + if self._showfspath is None: + return self.verbosity >= 0 + return self._showfspath + + @showfspath.setter + def showfspath(self, value): + self._showfspath = value + + @property + def showlongtestinfo(self): + return self.verbosity > 0 + + def hasopt(self, char): + char = {"xfailed": "x", "skipped": "s"}.get(char, char) + return char in self.reportchars + + def write_fspath_result(self, nodeid, res, **markup): + fspath = self.config.rootdir.join(nodeid.split("::")[0]) + # NOTE: explicitly check for None to work around py bug, and for less + # overhead in general (https://github.com/pytest-dev/py/pull/207). + if self.currentfspath is None or fspath != self.currentfspath: + if self.currentfspath is not None and self._show_progress_info: + self._write_progress_information_filling_space() + self.currentfspath = fspath + fspath = self.startdir.bestrelpath(fspath) + self._tw.line() + self._tw.write(fspath + " ") + self._tw.write(res, **markup) + + def write_ensure_prefix(self, prefix, extra="", **kwargs): + if self.currentfspath != prefix: + self._tw.line() + self.currentfspath = prefix + self._tw.write(prefix) + if extra: + self._tw.write(extra, **kwargs) + self.currentfspath = -2 + + def ensure_newline(self): + if self.currentfspath: + self._tw.line() + self.currentfspath = None + + def write(self, content, **markup): + self._tw.write(content, **markup) + + def write_line(self, line, **markup): + if not isinstance(line, six.text_type): + line = six.text_type(line, errors="replace") + self.ensure_newline() + self._tw.line(line, **markup) + + def rewrite(self, line, **markup): + """ + Rewinds the terminal cursor to the beginning and writes the given line. + + :kwarg erase: if True, will also add spaces until the full terminal width to ensure + previous lines are properly erased. + + The rest of the keyword arguments are markup instructions. + """ + erase = markup.pop("erase", False) + if erase: + fill_count = self._tw.fullwidth - len(line) - 1 + fill = " " * fill_count + else: + fill = "" + line = str(line) + self._tw.write("\r" + line + fill, **markup) + + def write_sep(self, sep, title=None, **markup): + self.ensure_newline() + self._tw.sep(sep, title, **markup) + + def section(self, title, sep="=", **kw): + self._tw.sep(sep, title, **kw) + + def line(self, msg, **kw): + self._tw.line(msg, **kw) + + def pytest_internalerror(self, excrepr): + for line in six.text_type(excrepr).split("\n"): + self.write_line("INTERNALERROR> " + line) + return 1 + + def pytest_warning_captured(self, warning_message, item): + # from _pytest.nodes import get_fslocation_from_item + from _pytest.warnings import warning_record_to_str + + warnings = self.stats.setdefault("warnings", []) + fslocation = warning_message.filename, warning_message.lineno + message = warning_record_to_str(warning_message) + + nodeid = item.nodeid if item is not None else "" + warning_report = WarningReport( + fslocation=fslocation, message=message, nodeid=nodeid + ) + warnings.append(warning_report) + + def pytest_plugin_registered(self, plugin): + if self.config.option.traceconfig: + msg = "PLUGIN registered: %s" % (plugin,) + # XXX this event may happen during setup/teardown time + # which unfortunately captures our output here + # which garbles our output if we use self.write_line + self.write_line(msg) + + def pytest_deselected(self, items): + self.stats.setdefault("deselected", []).extend(items) + + def pytest_runtest_logstart(self, nodeid, location): + # ensure that the path is printed before the + # 1st test of a module starts running + if self.showlongtestinfo: + line = self._locationline(nodeid, *location) + self.write_ensure_prefix(line, "") + elif self.showfspath: + fsid = nodeid.split("::")[0] + self.write_fspath_result(fsid, "") + + def pytest_runtest_logreport(self, report): + self._tests_ran = True + rep = report + res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config) + category, letter, word = res + if isinstance(word, tuple): + word, markup = word + else: + markup = None + self.stats.setdefault(category, []).append(rep) + if not letter and not word: + # probably passed setup/teardown + return + running_xdist = hasattr(rep, "node") + if markup is None: + was_xfail = hasattr(report, "wasxfail") + if rep.passed and not was_xfail: + markup = {"green": True} + elif rep.passed and was_xfail: + markup = {"yellow": True} + elif rep.failed: + markup = {"red": True} + elif rep.skipped: + markup = {"yellow": True} + else: + markup = {} + if self.verbosity <= 0: + if not running_xdist and self.showfspath: + self.write_fspath_result(rep.nodeid, letter, **markup) + else: + self._tw.write(letter, **markup) + else: + self._progress_nodeids_reported.add(rep.nodeid) + line = self._locationline(rep.nodeid, *rep.location) + if not running_xdist: + self.write_ensure_prefix(line, word, **markup) + if self._show_progress_info: + self._write_progress_information_filling_space() + else: + self.ensure_newline() + self._tw.write("[%s]" % rep.node.gateway.id) + if self._show_progress_info: + self._tw.write( + self._get_progress_information_message() + " ", cyan=True + ) + else: + self._tw.write(" ") + self._tw.write(word, **markup) + self._tw.write(" " + line) + self.currentfspath = -2 + + def pytest_runtest_logfinish(self, nodeid): + if self.config.getini("console_output_style") == "count": + num_tests = self._session.testscollected + progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests))) + else: + progress_length = len(" [100%]") + + if self.verbosity <= 0 and self._show_progress_info: + self._progress_nodeids_reported.add(nodeid) + last_item = ( + len(self._progress_nodeids_reported) == self._session.testscollected + ) + if last_item: + self._write_progress_information_filling_space() + else: + w = self._width_of_current_line + past_edge = w + progress_length + 1 >= self._screen_width + if past_edge: + msg = self._get_progress_information_message() + self._tw.write(msg + "\n", cyan=True) + + def _get_progress_information_message(self): + collected = self._session.testscollected + if self.config.getini("console_output_style") == "count": + if collected: + progress = self._progress_nodeids_reported + counter_format = "{{:{}d}}".format(len(str(collected))) + format_string = " [{}/{{}}]".format(counter_format) + return format_string.format(len(progress), collected) + return " [ {} / {} ]".format(collected, collected) + else: + if collected: + progress = len(self._progress_nodeids_reported) * 100 // collected + return " [{:3d}%]".format(progress) + return " [100%]" + + def _write_progress_information_filling_space(self): + msg = self._get_progress_information_message() + w = self._width_of_current_line + fill = self._tw.fullwidth - w - 1 + self.write(msg.rjust(fill), cyan=True) + + @property + def _width_of_current_line(self): + """Return the width of current line, using the superior implementation of py-1.6 when available""" + try: + return self._tw.width_of_current_line + except AttributeError: + # py < 1.6.0 + return self._tw.chars_on_current_line + + def pytest_collection(self): + if self.isatty: + if self.config.option.verbose >= 0: + self.write("collecting ... ", bold=True) + self._collect_report_last_write = time.time() + elif self.config.option.verbose >= 1: + self.write("collecting ... ", bold=True) + + def pytest_collectreport(self, report): + if report.failed: + self.stats.setdefault("error", []).append(report) + elif report.skipped: + self.stats.setdefault("skipped", []).append(report) + items = [x for x in report.result if isinstance(x, pytest.Item)] + self._numcollected += len(items) + if self.isatty: + self.report_collect() + + def report_collect(self, final=False): + if self.config.option.verbose < 0: + return + + if not final: + # Only write "collecting" report every 0.5s. + t = time.time() + if ( + self._collect_report_last_write is not None + and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION + ): + return + self._collect_report_last_write = t + + errors = len(self.stats.get("error", [])) + skipped = len(self.stats.get("skipped", [])) + deselected = len(self.stats.get("deselected", [])) + selected = self._numcollected - errors - skipped - deselected + if final: + line = "collected " + else: + line = "collecting " + line += ( + str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") + ) + if errors: + line += " / %d errors" % errors + if deselected: + line += " / %d deselected" % deselected + if skipped: + line += " / %d skipped" % skipped + if self._numcollected > selected > 0: + line += " / %d selected" % selected + if self.isatty: + self.rewrite(line, bold=True, erase=True) + if final: + self.write("\n") + else: + self.write_line(line) + + @pytest.hookimpl(trylast=True) + def pytest_collection_modifyitems(self): + self.report_collect(True) + + @pytest.hookimpl(trylast=True) + def pytest_sessionstart(self, session): + self._session = session + self._sessionstarttime = time.time() + if not self.showheader: + return + self.write_sep("=", "test session starts", bold=True) + verinfo = platform.python_version() + msg = "platform %s -- Python %s" % (sys.platform, verinfo) + if hasattr(sys, "pypy_version_info"): + verinfo = ".".join(map(str, sys.pypy_version_info[:3])) + msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3]) + msg += ", pytest-%s, py-%s, pluggy-%s" % ( + pytest.__version__, + py.__version__, + pluggy.__version__, + ) + if ( + self.verbosity > 0 + or self.config.option.debug + or getattr(self.config.option, "pastebin", None) + ): + msg += " -- " + str(sys.executable) + self.write_line(msg) + lines = self.config.hook.pytest_report_header( + config=self.config, startdir=self.startdir + ) + self._write_report_lines_from_hooks(lines) + + def _write_report_lines_from_hooks(self, lines): + lines.reverse() + for line in collapse(lines): + self.write_line(line) + + def pytest_report_header(self, config): + line = "rootdir: %s" % config.rootdir + + if config.inifile: + line += ", inifile: " + config.rootdir.bestrelpath(config.inifile) + + testpaths = config.getini("testpaths") + if testpaths and config.args == testpaths: + rel_paths = [config.rootdir.bestrelpath(x) for x in testpaths] + line += ", testpaths: {}".format(", ".join(rel_paths)) + result = [line] + + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo))) + return result + + def pytest_collection_finish(self, session): + if self.config.getoption("collectonly"): + self._printcollecteditems(session.items) + + lines = self.config.hook.pytest_report_collectionfinish( + config=self.config, startdir=self.startdir, items=session.items + ) + self._write_report_lines_from_hooks(lines) + + if self.config.getoption("collectonly"): + if self.stats.get("failed"): + self._tw.sep("!", "collection failures") + for rep in self.stats.get("failed"): + rep.toterminal(self._tw) + + def _printcollecteditems(self, items): + # to print out items and their parent collectors + # we take care to leave out Instances aka () + # because later versions are going to get rid of them anyway + if self.config.option.verbose < 0: + if self.config.option.verbose < -1: + counts = {} + for item in items: + name = item.nodeid.split("::", 1)[0] + counts[name] = counts.get(name, 0) + 1 + for name, count in sorted(counts.items()): + self._tw.line("%s: %d" % (name, count)) + else: + for item in items: + self._tw.line(item.nodeid) + return + stack = [] + indent = "" + for item in items: + needed_collectors = item.listchain()[1:] # strip root node + while stack: + if stack == needed_collectors[: len(stack)]: + break + stack.pop() + for col in needed_collectors[len(stack) :]: + stack.append(col) + if col.name == "()": # Skip Instances. + continue + indent = (len(stack) - 1) * " " + self._tw.line("%s%s" % (indent, col)) + if self.config.option.verbose >= 1: + if hasattr(col, "_obj") and col._obj.__doc__: + for line in col._obj.__doc__.strip().splitlines(): + self._tw.line("%s%s" % (indent + " ", line.strip())) + + @pytest.hookimpl(hookwrapper=True) + def pytest_sessionfinish(self, exitstatus): + outcome = yield + outcome.get_result() + self._tw.line("") + summary_exit_codes = ( + EXIT_OK, + EXIT_TESTSFAILED, + EXIT_INTERRUPTED, + EXIT_USAGEERROR, + EXIT_NOTESTSCOLLECTED, + ) + if exitstatus in summary_exit_codes: + self.config.hook.pytest_terminal_summary( + terminalreporter=self, exitstatus=exitstatus, config=self.config + ) + if exitstatus == EXIT_INTERRUPTED: + self._report_keyboardinterrupt() + del self._keyboardinterrupt_memo + self.summary_stats() + + @pytest.hookimpl(hookwrapper=True) + def pytest_terminal_summary(self): + self.summary_errors() + self.summary_failures() + self.summary_warnings() + yield + self.summary_passes() + # Display any extra warnings from teardown here (if any). + self.summary_warnings() + + def pytest_keyboard_interrupt(self, excinfo): + self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) + + def pytest_unconfigure(self): + if hasattr(self, "_keyboardinterrupt_memo"): + self._report_keyboardinterrupt() + + def _report_keyboardinterrupt(self): + excrepr = self._keyboardinterrupt_memo + msg = excrepr.reprcrash.message + self.write_sep("!", msg) + if "KeyboardInterrupt" in msg: + if self.config.option.fulltrace: + excrepr.toterminal(self._tw) + else: + excrepr.reprcrash.toterminal(self._tw) + self._tw.line( + "(to show a full traceback on KeyboardInterrupt use --fulltrace)", + yellow=True, + ) + + def _locationline(self, nodeid, fspath, lineno, domain): + def mkrel(nodeid): + line = self.config.cwd_relative_nodeid(nodeid) + if domain and line.endswith(domain): + line = line[: -len(domain)] + values = domain.split("[") + values[0] = values[0].replace(".", "::") # don't replace '.' in params + line += "[".join(values) + return line + + # collect_fspath comes from testid which has a "/"-normalized path + + if fspath: + res = mkrel(nodeid) + if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( + "\\", nodes.SEP + ): + res += " <- " + self.startdir.bestrelpath(fspath) + else: + res = "[location]" + return res + " " + + def _getfailureheadline(self, rep): + if rep.head_line: + return rep.head_line + else: + return "test session" # XXX? + + def _getcrashline(self, rep): + try: + return str(rep.longrepr.reprcrash) + except AttributeError: + try: + return str(rep.longrepr)[:50] + except AttributeError: + return "" + + # + # summaries for sessionfinish + # + def getreports(self, name): + values = [] + for x in self.stats.get(name, []): + if not hasattr(x, "_pdbshown"): + values.append(x) + return values + + def summary_warnings(self): + if self.hasopt("w"): + all_warnings = self.stats.get("warnings") + if not all_warnings: + return + + final = hasattr(self, "_already_displayed_warnings") + if final: + warning_reports = all_warnings[self._already_displayed_warnings :] + else: + warning_reports = all_warnings + self._already_displayed_warnings = len(warning_reports) + if not warning_reports: + return + + reports_grouped_by_message = collections.OrderedDict() + for wr in warning_reports: + reports_grouped_by_message.setdefault(wr.message, []).append(wr) + + title = "warnings summary (final)" if final else "warnings summary" + self.write_sep("=", title, yellow=True, bold=False) + for message, warning_reports in reports_grouped_by_message.items(): + has_any_location = False + for w in warning_reports: + location = w.get_location(self.config) + if location: + self._tw.line(str(location)) + has_any_location = True + if has_any_location: + lines = message.splitlines() + indented = "\n".join(" " + x for x in lines) + message = indented.rstrip() + else: + message = message.rstrip() + self._tw.line(message) + self._tw.line() + self._tw.line("-- Docs: https://docs.pytest.org/en/latest/warnings.html") + + def summary_passes(self): + if self.config.option.tbstyle != "no": + if self.hasopt("P"): + reports = self.getreports("passed") + if not reports: + return + self.write_sep("=", "PASSES") + for rep in reports: + if rep.sections: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg) + self._outrep_summary(rep) + + def print_teardown_sections(self, rep): + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + if "teardown" in secname: + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_failures(self): + if self.config.option.tbstyle != "no": + reports = self.getreports("failed") + if not reports: + return + self.write_sep("=", "FAILURES") + for rep in reports: + if self.config.option.tbstyle == "line": + line = self._getcrashline(rep) + self.write_line(line) + else: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + for report in self.getreports(""): + if report.nodeid == rep.nodeid and report.when == "teardown": + self.print_teardown_sections(report) + + def summary_errors(self): + if self.config.option.tbstyle != "no": + reports = self.getreports("error") + if not reports: + return + self.write_sep("=", "ERRORS") + for rep in self.stats["error"]: + msg = self._getfailureheadline(rep) + if rep.when == "collect": + msg = "ERROR collecting " + msg + elif rep.when == "setup": + msg = "ERROR at setup of " + msg + elif rep.when == "teardown": + msg = "ERROR at teardown of " + msg + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + + def _outrep_summary(self, rep): + rep.toterminal(self._tw) + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_stats(self): + session_duration = time.time() - self._sessionstarttime + (line, color) = build_summary_stats_line(self.stats) + msg = "%s in %.2f seconds" % (line, session_duration) + markup = {color: True, "bold": True} + + if self.verbosity >= 0: + self.write_sep("=", msg, **markup) + if self.verbosity == -1: + self.write_line(msg, **markup) + + +def build_summary_stats_line(stats): + known_types = ( + "failed passed skipped deselected xfailed xpassed warnings error".split() + ) + unknown_type_seen = False + for found_type in stats: + if found_type not in known_types: + if found_type: # setup/teardown reports have an empty key, ignore them + known_types.append(found_type) + unknown_type_seen = True + parts = [] + for key in known_types: + reports = stats.get(key, None) + if reports: + count = sum( + 1 for rep in reports if getattr(rep, "count_towards_summary", True) + ) + parts.append("%d %s" % (count, key)) + + if parts: + line = ", ".join(parts) + else: + line = "no tests ran" + + if "failed" in stats or "error" in stats: + color = "red" + elif "warnings" in stats or unknown_type_seen: + color = "yellow" + elif "passed" in stats: + color = "green" + else: + color = "yellow" + + return line, color + + +def _plugin_nameversions(plugininfo): + values = [] + for plugin, dist in plugininfo: + # gets us name and version! + name = "{dist.project_name}-{dist.version}".format(dist=dist) + # questionable convenience, but it keeps things short + if name.startswith("pytest-"): + name = name[7:] + # we decided to print python package names + # they can have more than one plugin + if name not in values: + values.append(name) + return values diff --git a/env_27/lib/python2.7/site-packages/_pytest/tmpdir.py b/env_27/lib/python2.7/site-packages/_pytest/tmpdir.py new file mode 100644 index 0000000..4d109cc --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/tmpdir.py @@ -0,0 +1,196 @@ +""" support for providing temporary directories to test functions. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import re +import tempfile +import warnings + +import attr +import py +import six + +import pytest +from .pathlib import ensure_reset_dir +from .pathlib import LOCK_TIMEOUT +from .pathlib import make_numbered_dir +from .pathlib import make_numbered_dir_with_cleanup +from .pathlib import Path +from _pytest.monkeypatch import MonkeyPatch + + +@attr.s +class TempPathFactory(object): + """Factory for temporary directories under the common base temp directory. + + The base directory can be configured using the ``--basetemp`` option.""" + + _given_basetemp = attr.ib( + # using os.path.abspath() to get absolute path instead of resolve() as it + # does not work the same in all platforms (see #4427) + # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012) + converter=attr.converters.optional( + lambda p: Path(os.path.abspath(six.text_type(p))) + ) + ) + _trace = attr.ib() + _basetemp = attr.ib(default=None) + + @classmethod + def from_config(cls, config): + """ + :param config: a pytest configuration + """ + return cls( + given_basetemp=config.option.basetemp, trace=config.trace.get("tmpdir") + ) + + def mktemp(self, basename, numbered=True): + """makes a temporary directory managed by the factory""" + if not numbered: + p = self.getbasetemp().joinpath(basename) + p.mkdir() + else: + p = make_numbered_dir(root=self.getbasetemp(), prefix=basename) + self._trace("mktemp", p) + return p + + def getbasetemp(self): + """ return base temporary directory. """ + if self._basetemp is None: + if self._given_basetemp is not None: + basetemp = self._given_basetemp + ensure_reset_dir(basetemp) + basetemp = basetemp.resolve() + else: + from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") + temproot = Path(from_env or tempfile.gettempdir()).resolve() + user = get_user() or "unknown" + # use a sub-directory in the temproot to speed-up + # make_numbered_dir() call + rootdir = temproot.joinpath("pytest-of-{}".format(user)) + rootdir.mkdir(exist_ok=True) + basetemp = make_numbered_dir_with_cleanup( + prefix="pytest-", root=rootdir, keep=3, lock_timeout=LOCK_TIMEOUT + ) + assert basetemp is not None + self._basetemp = t = basetemp + self._trace("new basetemp", t) + return t + else: + return self._basetemp + + +@attr.s +class TempdirFactory(object): + """ + backward comptibility wrapper that implements + :class:``py.path.local`` for :class:``TempPathFactory`` + """ + + _tmppath_factory = attr.ib() + + def ensuretemp(self, string, dir=1): + """ (deprecated) return temporary directory path with + the given string as the trailing part. It is usually + better to use the 'tmpdir' function argument which + provides an empty unique-per-test-invocation directory + and is guaranteed to be empty. + """ + # py.log._apiwarn(">1.1", "use tmpdir function argument") + from .deprecated import PYTEST_ENSURETEMP + + warnings.warn(PYTEST_ENSURETEMP, stacklevel=2) + return self.getbasetemp().ensure(string, dir=dir) + + def mktemp(self, basename, numbered=True): + """Create a subdirectory of the base temporary directory and return it. + If ``numbered``, ensure the directory is unique by adding a number + prefix greater than any existing one. + """ + return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve()) + + def getbasetemp(self): + """backward compat wrapper for ``_tmppath_factory.getbasetemp``""" + return py.path.local(self._tmppath_factory.getbasetemp().resolve()) + + +def get_user(): + """Return the current user name, or None if getuser() does not work + in the current environment (see #1010). + """ + import getpass + + try: + return getpass.getuser() + except (ImportError, KeyError): + return None + + +def pytest_configure(config): + """Create a TempdirFactory and attach it to the config object. + + This is to comply with existing plugins which expect the handler to be + available at pytest_configure time, but ideally should be moved entirely + to the tmpdir_factory session fixture. + """ + mp = MonkeyPatch() + tmppath_handler = TempPathFactory.from_config(config) + t = TempdirFactory(tmppath_handler) + config._cleanup.append(mp.undo) + mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False) + mp.setattr(config, "_tmpdirhandler", t, raising=False) + mp.setattr(pytest, "ensuretemp", t.ensuretemp, raising=False) + + +@pytest.fixture(scope="session") +def tmpdir_factory(request): + """Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session. + """ + return request.config._tmpdirhandler + + +@pytest.fixture(scope="session") +def tmp_path_factory(request): + """Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session. + """ + return request.config._tmp_path_factory + + +def _mk_tmp(request, factory): + name = request.node.name + name = re.sub(r"[\W]", "_", name) + MAXVAL = 30 + name = name[:MAXVAL] + return factory.mktemp(name, numbered=True) + + +@pytest.fixture +def tmpdir(tmp_path): + """Return a temporary directory path object + which is unique to each test function invocation, + created as a sub directory of the base temporary + directory. The returned object is a `py.path.local`_ + path object. + + .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html + """ + return py.path.local(tmp_path) + + +@pytest.fixture +def tmp_path(request, tmp_path_factory): + """Return a temporary directory path object + which is unique to each test function invocation, + created as a sub directory of the base temporary + directory. The returned object is a :class:`pathlib.Path` + object. + + .. note:: + + in python < 3.6 this is a pathlib2.Path + """ + + return _mk_tmp(request, tmp_path_factory) diff --git a/env_27/lib/python2.7/site-packages/_pytest/unittest.py b/env_27/lib/python2.7/site-packages/_pytest/unittest.py new file mode 100644 index 0000000..58d7984 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/unittest.py @@ -0,0 +1,288 @@ +""" discovery and running of std-library "unittest" style tests. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import traceback + +import _pytest._code +import pytest +from _pytest.compat import getimfunc +from _pytest.config import hookimpl +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.python import Class +from _pytest.python import Function + + +def pytest_pycollect_makeitem(collector, name, obj): + # has unittest been imported and is obj a subclass of its TestCase? + try: + if not issubclass(obj, sys.modules["unittest"].TestCase): + return + except Exception: + return + # yes, so let's collect it + return UnitTestCase(name, parent=collector) + + +class UnitTestCase(Class): + # marker for fixturemanger.getfixtureinfo() + # to declare that our children do not support funcargs + nofuncargs = True + + def collect(self): + from unittest import TestLoader + + cls = self.obj + if not getattr(cls, "__test__", True): + return + + skipped = getattr(cls, "__unittest_skip__", False) + if not skipped: + self._inject_setup_teardown_fixtures(cls) + self._inject_setup_class_fixture() + + self.session._fixturemanager.parsefactories(self, unittest=True) + loader = TestLoader() + foundsomething = False + for name in loader.getTestCaseNames(self.obj): + x = getattr(self.obj, name) + if not getattr(x, "__test__", True): + continue + funcobj = getimfunc(x) + yield TestCaseFunction(name, parent=self, callobj=funcobj) + foundsomething = True + + if not foundsomething: + runtest = getattr(self.obj, "runTest", None) + if runtest is not None: + ut = sys.modules.get("twisted.trial.unittest", None) + if ut is None or runtest != ut.TestCase.runTest: + yield TestCaseFunction("runTest", parent=self) + + def _inject_setup_teardown_fixtures(self, cls): + """Injects a hidden auto-use fixture to invoke setUpClass/setup_method and corresponding + teardown functions (#517)""" + class_fixture = _make_xunit_fixture( + cls, "setUpClass", "tearDownClass", scope="class", pass_self=False + ) + if class_fixture: + cls.__pytest_class_setup = class_fixture + + method_fixture = _make_xunit_fixture( + cls, "setup_method", "teardown_method", scope="function", pass_self=True + ) + if method_fixture: + cls.__pytest_method_setup = method_fixture + + +def _make_xunit_fixture(obj, setup_name, teardown_name, scope, pass_self): + setup = getattr(obj, setup_name, None) + teardown = getattr(obj, teardown_name, None) + if setup is None and teardown is None: + return None + + @pytest.fixture(scope=scope, autouse=True) + def fixture(self, request): + if getattr(self, "__unittest_skip__", None): + reason = self.__unittest_skip_why__ + pytest.skip(reason) + if setup is not None: + if pass_self: + setup(self, request.function) + else: + setup() + yield + if teardown is not None: + if pass_self: + teardown(self, request.function) + else: + teardown() + + return fixture + + +class TestCaseFunction(Function): + nofuncargs = True + _excinfo = None + _testcase = None + + def setup(self): + self._testcase = self.parent.obj(self.name) + self._fix_unittest_skip_decorator() + if hasattr(self, "_request"): + self._request._fillfixtures() + + def _fix_unittest_skip_decorator(self): + """ + The @unittest.skip decorator calls functools.wraps(self._testcase) + The call to functools.wraps() fails unless self._testcase + has a __name__ attribute. This is usually automatically supplied + if the test is a function or method, but we need to add manually + here. + + See issue #1169 + """ + if sys.version_info[0] == 2: + setattr(self._testcase, "__name__", self.name) + + def teardown(self): + self._testcase = None + + def startTest(self, testcase): + pass + + def _addexcinfo(self, rawexcinfo): + # unwrap potential exception info (see twisted trial support below) + rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) + try: + excinfo = _pytest._code.ExceptionInfo(rawexcinfo) + # invoke the attributes to trigger storing the traceback + # trial causes some issue there + excinfo.value + excinfo.traceback + except TypeError: + try: + try: + values = traceback.format_exception(*rawexcinfo) + values.insert( + 0, + "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n", + ) + fail("".join(values), pytrace=False) + except (fail.Exception, KeyboardInterrupt): + raise + except: # noqa + fail( + "ERROR: Unknown Incompatible Exception " + "representation:\n%r" % (rawexcinfo,), + pytrace=False, + ) + except KeyboardInterrupt: + raise + except fail.Exception: + excinfo = _pytest._code.ExceptionInfo.from_current() + self.__dict__.setdefault("_excinfo", []).append(excinfo) + + def addError(self, testcase, rawexcinfo): + self._addexcinfo(rawexcinfo) + + def addFailure(self, testcase, rawexcinfo): + self._addexcinfo(rawexcinfo) + + def addSkip(self, testcase, reason): + try: + skip(reason) + except skip.Exception: + self._skipped_by_mark = True + self._addexcinfo(sys.exc_info()) + + def addExpectedFailure(self, testcase, rawexcinfo, reason=""): + try: + xfail(str(reason)) + except xfail.Exception: + self._addexcinfo(sys.exc_info()) + + def addUnexpectedSuccess(self, testcase, reason=""): + self._unexpectedsuccess = reason + + def addSuccess(self, testcase): + pass + + def stopTest(self, testcase): + pass + + def _handle_skip(self): + # implements the skipping machinery (see #2137) + # analog to pythons Lib/unittest/case.py:run + testMethod = getattr(self._testcase, self._testcase._testMethodName) + if getattr(self._testcase.__class__, "__unittest_skip__", False) or getattr( + testMethod, "__unittest_skip__", False + ): + # If the class or method was skipped. + skip_why = getattr( + self._testcase.__class__, "__unittest_skip_why__", "" + ) or getattr(testMethod, "__unittest_skip_why__", "") + try: # PY3, unittest2 on PY2 + self._testcase._addSkip(self, self._testcase, skip_why) + except TypeError: # PY2 + if sys.version_info[0] != 2: + raise + self._testcase._addSkip(self, skip_why) + return True + return False + + def runtest(self): + if self.config.pluginmanager.get_plugin("pdbinvoke") is None: + self._testcase(result=self) + else: + # disables tearDown and cleanups for post mortem debugging (see #1890) + if self._handle_skip(): + return + self._testcase.debug() + + def _prunetraceback(self, excinfo): + Function._prunetraceback(self, excinfo) + traceback = excinfo.traceback.filter( + lambda x: not x.frame.f_globals.get("__unittest") + ) + if traceback: + excinfo.traceback = traceback + + +@hookimpl(tryfirst=True) +def pytest_runtest_makereport(item, call): + if isinstance(item, TestCaseFunction): + if item._excinfo: + call.excinfo = item._excinfo.pop(0) + try: + del call.result + except AttributeError: + pass + + +# twisted trial support + + +@hookimpl(hookwrapper=True) +def pytest_runtest_protocol(item): + if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: + ut = sys.modules["twisted.python.failure"] + Failure__init__ = ut.Failure.__init__ + check_testcase_implements_trial_reporter() + + def excstore( + self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None + ): + if exc_value is None: + self._rawexcinfo = sys.exc_info() + else: + if exc_type is None: + exc_type = type(exc_value) + self._rawexcinfo = (exc_type, exc_value, exc_tb) + try: + Failure__init__( + self, exc_value, exc_type, exc_tb, captureVars=captureVars + ) + except TypeError: + Failure__init__(self, exc_value, exc_type, exc_tb) + + ut.Failure.__init__ = excstore + yield + ut.Failure.__init__ = Failure__init__ + else: + yield + + +def check_testcase_implements_trial_reporter(done=[]): + if done: + return + from zope.interface import classImplements + from twisted.trial.itrial import IReporter + + classImplements(TestCaseFunction, IReporter) + done.append(1) diff --git a/env_27/lib/python2.7/site-packages/_pytest/warning_types.py b/env_27/lib/python2.7/site-packages/_pytest/warning_types.py new file mode 100644 index 0000000..55e1f03 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/warning_types.py @@ -0,0 +1,60 @@ +import attr + + +class PytestWarning(UserWarning): + """ + Bases: :class:`UserWarning`. + + Base class for all warnings emitted by pytest. + """ + + +class PytestDeprecationWarning(PytestWarning, DeprecationWarning): + """ + Bases: :class:`pytest.PytestWarning`, :class:`DeprecationWarning`. + + Warning class for features that will be removed in a future version. + """ + + +class RemovedInPytest4Warning(PytestDeprecationWarning): + """ + Bases: :class:`pytest.PytestDeprecationWarning`. + + Warning class for features scheduled to be removed in pytest 4.0. + """ + + +class PytestExperimentalApiWarning(PytestWarning, FutureWarning): + """ + Bases: :class:`pytest.PytestWarning`, :class:`FutureWarning`. + + Warning category used to denote experiments in pytest. Use sparingly as the API might change or even be + removed completely in future version + """ + + @classmethod + def simple(cls, apiname): + return cls( + "{apiname} is an experimental api that may change over time".format( + apiname=apiname + ) + ) + + +@attr.s +class UnformattedWarning(object): + """Used to hold warnings that need to format their message at runtime, as opposed to a direct message. + + Using this class avoids to keep all the warning types and messages in this module, avoiding misuse. + """ + + category = attr.ib() + template = attr.ib() + + def format(self, **kwargs): + """Returns an instance of the warning category, formatted with given kwargs""" + return self.category(self.template.format(**kwargs)) + + +PYTESTER_COPY_EXAMPLE = PytestExperimentalApiWarning.simple("testdir.copy_example") diff --git a/env_27/lib/python2.7/site-packages/_pytest/warnings.py b/env_27/lib/python2.7/site-packages/_pytest/warnings.py new file mode 100644 index 0000000..3360aea --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest/warnings.py @@ -0,0 +1,179 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import warnings +from contextlib import contextmanager + +import pytest +from _pytest import compat + +SHOW_PYTEST_WARNINGS_ARG = "-Walways::pytest.RemovedInPytest4Warning" + + +def _setoption(wmod, arg): + """ + Copy of the warning._setoption function but does not escape arguments. + """ + parts = arg.split(":") + if len(parts) > 5: + raise wmod._OptionError("too many fields (max 5): %r" % (arg,)) + while len(parts) < 5: + parts.append("") + action, message, category, module, lineno = [s.strip() for s in parts] + action = wmod._getaction(action) + category = wmod._getcategory(category) + if lineno: + try: + lineno = int(lineno) + if lineno < 0: + raise ValueError + except (ValueError, OverflowError): + raise wmod._OptionError("invalid lineno %r" % (lineno,)) + else: + lineno = 0 + wmod.filterwarnings(action, message, category, module, lineno) + + +def pytest_addoption(parser): + group = parser.getgroup("pytest-warnings") + group.addoption( + "-W", + "--pythonwarnings", + action="append", + help="set which warnings to report, see -W option of python itself.", + ) + parser.addini( + "filterwarnings", + type="linelist", + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W and --pythonwarnings.", + ) + + +def pytest_configure(config): + config.addinivalue_line( + "markers", + "filterwarnings(warning): add a warning filter to the given test. " + "see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings ", + ) + + +@contextmanager +def catch_warnings_for_item(config, ihook, when, item): + """ + Context manager that catches warnings generated in the contained execution block. + + ``item`` can be None if we are not in the context of an item execution. + + Each warning captured triggers the ``pytest_warning_captured`` hook. + """ + cmdline_filters = config.getoption("pythonwarnings") or [] + inifilters = config.getini("filterwarnings") + with warnings.catch_warnings(record=True) as log: + + if not sys.warnoptions: + # if user is not explicitly configuring warning filters, show deprecation warnings by default (#2908) + warnings.filterwarnings("always", category=DeprecationWarning) + warnings.filterwarnings("always", category=PendingDeprecationWarning) + + warnings.filterwarnings("error", category=pytest.RemovedInPytest4Warning) + + # filters should have this precedence: mark, cmdline options, ini + # filters should be applied in the inverse order of precedence + for arg in inifilters: + _setoption(warnings, arg) + + for arg in cmdline_filters: + warnings._setoption(arg) + + if item is not None: + for mark in item.iter_markers(name="filterwarnings"): + for arg in mark.args: + _setoption(warnings, arg) + + yield + + for warning_message in log: + ihook.pytest_warning_captured.call_historic( + kwargs=dict(warning_message=warning_message, when=when, item=item) + ) + + +def warning_record_to_str(warning_message): + """Convert a warnings.WarningMessage to a string. + + This takes lot of unicode shenaningans into account for Python 2. + When Python 2 support is dropped this function can be greatly simplified. + """ + warn_msg = warning_message.message + unicode_warning = False + if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args): + new_args = [] + for m in warn_msg.args: + new_args.append( + compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m + ) + unicode_warning = list(warn_msg.args) != new_args + warn_msg.args = new_args + + msg = warnings.formatwarning( + warn_msg, + warning_message.category, + warning_message.filename, + warning_message.lineno, + warning_message.line, + ) + if unicode_warning: + warnings.warn( + "Warning is using unicode non convertible to ascii, " + "converting to a safe representation:\n {!r}".format(compat.safe_str(msg)), + UnicodeWarning, + ) + return msg + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_protocol(item): + with catch_warnings_for_item( + config=item.config, ihook=item.ihook, when="runtest", item=item + ): + yield + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_collection(session): + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="collect", item=None + ): + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_terminal_summary(terminalreporter): + config = terminalreporter.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + yield + + +def _issue_warning_captured(warning, hook, stacklevel): + """ + This function should be used instead of calling ``warnings.warn`` directly when we are in the "configure" stage: + at this point the actual options might not have been set, so we manually trigger the pytest_warning_captured + hook so we can display this warnings in the terminal. This is a hack until we can sort out #2891. + + :param warning: the warning instance. + :param hook: the hook caller + :param stacklevel: stacklevel forwarded to warnings.warn + """ + with warnings.catch_warnings(record=True) as records: + warnings.simplefilter("always", type(warning)) + warnings.warn(warning, stacklevel=stacklevel) + hook.pytest_warning_captured.call_historic( + kwargs=dict(warning_message=records[0], when="config", item=None) + ) diff --git a/env_27/lib/python2.7/site-packages/_pytest_mock_version.py b/env_27/lib/python2.7/site-packages/_pytest_mock_version.py new file mode 100644 index 0000000..dcd4dc1 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/_pytest_mock_version.py @@ -0,0 +1,4 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '1.10.3' diff --git a/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/INSTALLER b/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/METADATA b/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/METADATA new file mode 100644 index 0000000..a07d53d --- /dev/null +++ b/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/METADATA @@ -0,0 +1,144 @@ +Metadata-Version: 2.1 +Name: atomicwrites +Version: 1.3.0 +Summary: Atomic file writes. +Home-page: https://github.com/untitaker/python-atomicwrites +Author: Markus Unterwaditzer +Author-email: markus@unterwaditzer.net +License: MIT +Platform: UNKNOWN +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: Implementation :: CPython +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* + +=================== +python-atomicwrites +=================== + +.. image:: https://travis-ci.org/untitaker/python-atomicwrites.svg?branch=master + :target: https://travis-ci.org/untitaker/python-atomicwrites + +.. image:: https://ci.appveyor.com/api/projects/status/vadc4le3c27to59x/branch/master?svg=true + :target: https://ci.appveyor.com/project/untitaker/python-atomicwrites/branch/master + +Atomic file writes. + +.. code-block:: python + + from atomicwrites import atomic_write + + with atomic_write('foo.txt', overwrite=True) as f: + f.write('Hello world.') + # "foo.txt" doesn't exist yet. + + # Now it does. + + +Features that distinguish it from other similar libraries (see `Alternatives and Credit`_): + +- Race-free assertion that the target file doesn't yet exist. This can be + controlled with the ``overwrite`` parameter. + +- Windows support, although not well-tested. The MSDN resources are not very + explicit about which operations are atomic. I'm basing my assumptions off `a + comment + `_ + by `Doug Crook + `_, who appears + to be a Microsoft employee: + + FAQ: Is MoveFileEx atomic + Frequently asked question: Is MoveFileEx atomic if the existing and new + files are both on the same drive? + + The simple answer is "usually, but in some cases it will silently fall-back + to a non-atomic method, so don't count on it". + + The implementation of MoveFileEx looks something like this: [...] + + The problem is if the rename fails, you might end up with a CopyFile, which + is definitely not atomic. + + If you really need atomic-or-nothing, you can try calling + NtSetInformationFile, which is unsupported but is much more likely to be + atomic. + +- Simple high-level API that wraps a very flexible class-based API. + +- Consistent error handling across platforms. + + +How it works +============ + +It uses a temporary file in the same directory as the given path. This ensures +that the temporary file resides on the same filesystem. + +The temporary file will then be atomically moved to the target location: On +POSIX, it will use ``rename`` if files should be overwritten, otherwise a +combination of ``link`` and ``unlink``. On Windows, it uses MoveFileEx_ through +stdlib's ``ctypes`` with the appropriate flags. + +Note that with ``link`` and ``unlink``, there's a timewindow where the file +might be available under two entries in the filesystem: The name of the +temporary file, and the name of the target file. + +Also note that the permissions of the target file may change this way. In some +situations a ``chmod`` can be issued without any concurrency problems, but +since that is not always the case, this library doesn't do it by itself. + +.. _MoveFileEx: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365240%28v=vs.85%29.aspx + +fsync +----- + +On POSIX, ``fsync`` is invoked on the temporary file after it is written (to +flush file content and metadata), and on the parent directory after the file is +moved (to flush filename). + +``fsync`` does not take care of disks' internal buffers, but there don't seem +to be any standard POSIX APIs for that. On OS X, ``fcntl`` is used with +``F_FULLFSYNC`` instead of ``fsync`` for that reason. + +On Windows, `_commit `_ +is used, but there are no guarantees about disk internal buffers. + +Alternatives and Credit +======================= + +Atomicwrites is directly inspired by the following libraries (and shares a +minimal amount of code): + +- The Trac project's `utility functions + `_, + also used in `Werkzeug `_ and + `mitsuhiko/python-atomicfile + `_. The idea to use + ``ctypes`` instead of ``PyWin32`` originated there. + +- `abarnert/fatomic `_. Windows support + (based on ``PyWin32``) was originally taken from there. + +Other alternatives to atomicwrites include: + +- `sashka/atomicfile `_. Originally I + considered using that, but at the time it was lacking a lot of features I + needed (Windows support, overwrite-parameter, overriding behavior through + subclassing). + +- The `Boltons library collection `_ + features a class for atomic file writes, which seems to have a very similar + ``overwrite`` parameter. It is lacking Windows support though. + +License +======= + +Licensed under the MIT, see ``LICENSE``. + + diff --git a/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/RECORD b/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/RECORD new file mode 100644 index 0000000..1ec09bd --- /dev/null +++ b/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/RECORD @@ -0,0 +1,7 @@ +atomicwrites-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +atomicwrites-1.3.0.dist-info/METADATA,sha256=T9BgDww1IuYBG4de40vAgvpSbNBrRtNjTBNcukt1HZU,5535 +atomicwrites-1.3.0.dist-info/RECORD,, +atomicwrites-1.3.0.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 +atomicwrites-1.3.0.dist-info/top_level.txt,sha256=ks64zKVUkrl2ZrrP046CsytXlSGf8gLG-IcoXpNyeoc,13 +atomicwrites/__init__.py,sha256=00DapdQb04-k79KZjzzfwnI1Q8nfsiF5TPeVcqbGVw0,6562 +atomicwrites/__init__.pyc,, diff --git a/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/WHEEL b/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/WHEEL new file mode 100644 index 0000000..1316c41 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.31.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/top_level.txt b/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/top_level.txt new file mode 100644 index 0000000..5fa5a87 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/atomicwrites-1.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +atomicwrites diff --git a/env_27/lib/python2.7/site-packages/atomicwrites/__init__.py b/env_27/lib/python2.7/site-packages/atomicwrites/__init__.py new file mode 100644 index 0000000..cfa5c29 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/atomicwrites/__init__.py @@ -0,0 +1,216 @@ +import contextlib +import io +import os +import sys +import tempfile + +try: + import fcntl +except ImportError: + fcntl = None + +__version__ = '1.3.0' + + +PY2 = sys.version_info[0] == 2 + +text_type = unicode if PY2 else str # noqa + + +def _path_to_unicode(x): + if not isinstance(x, text_type): + return x.decode(sys.getfilesystemencoding()) + return x + + +DEFAULT_MODE = "wb" if PY2 else "w" + + +_proper_fsync = os.fsync + + +if sys.platform != 'win32': + if hasattr(fcntl, 'F_FULLFSYNC'): + def _proper_fsync(fd): + # https://lists.apple.com/archives/darwin-dev/2005/Feb/msg00072.html + # https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/fsync.2.html + # https://github.com/untitaker/python-atomicwrites/issues/6 + fcntl.fcntl(fd, fcntl.F_FULLFSYNC) + + def _sync_directory(directory): + # Ensure that filenames are written to disk + fd = os.open(directory, 0) + try: + _proper_fsync(fd) + finally: + os.close(fd) + + def _replace_atomic(src, dst): + os.rename(src, dst) + _sync_directory(os.path.normpath(os.path.dirname(dst))) + + def _move_atomic(src, dst): + os.link(src, dst) + os.unlink(src) + + src_dir = os.path.normpath(os.path.dirname(src)) + dst_dir = os.path.normpath(os.path.dirname(dst)) + _sync_directory(dst_dir) + if src_dir != dst_dir: + _sync_directory(src_dir) +else: + from ctypes import windll, WinError + + _MOVEFILE_REPLACE_EXISTING = 0x1 + _MOVEFILE_WRITE_THROUGH = 0x8 + _windows_default_flags = _MOVEFILE_WRITE_THROUGH + + def _handle_errors(rv): + if not rv: + raise WinError() + + def _replace_atomic(src, dst): + _handle_errors(windll.kernel32.MoveFileExW( + _path_to_unicode(src), _path_to_unicode(dst), + _windows_default_flags | _MOVEFILE_REPLACE_EXISTING + )) + + def _move_atomic(src, dst): + _handle_errors(windll.kernel32.MoveFileExW( + _path_to_unicode(src), _path_to_unicode(dst), + _windows_default_flags + )) + + +def replace_atomic(src, dst): + ''' + Move ``src`` to ``dst``. If ``dst`` exists, it will be silently + overwritten. + + Both paths must reside on the same filesystem for the operation to be + atomic. + ''' + return _replace_atomic(src, dst) + + +def move_atomic(src, dst): + ''' + Move ``src`` to ``dst``. There might a timewindow where both filesystem + entries exist. If ``dst`` already exists, :py:exc:`FileExistsError` will be + raised. + + Both paths must reside on the same filesystem for the operation to be + atomic. + ''' + return _move_atomic(src, dst) + + +class AtomicWriter(object): + ''' + A helper class for performing atomic writes. Usage:: + + with AtomicWriter(path).open() as f: + f.write(...) + + :param path: The destination filepath. May or may not exist. + :param mode: The filemode for the temporary file. This defaults to `wb` in + Python 2 and `w` in Python 3. + :param overwrite: If set to false, an error is raised if ``path`` exists. + Errors are only raised after the file has been written to. Either way, + the operation is atomic. + + If you need further control over the exact behavior, you are encouraged to + subclass. + ''' + + def __init__(self, path, mode=DEFAULT_MODE, overwrite=False, + **open_kwargs): + if 'a' in mode: + raise ValueError( + 'Appending to an existing file is not supported, because that ' + 'would involve an expensive `copy`-operation to a temporary ' + 'file. Open the file in normal `w`-mode and copy explicitly ' + 'if that\'s what you\'re after.' + ) + if 'x' in mode: + raise ValueError('Use the `overwrite`-parameter instead.') + if 'w' not in mode: + raise ValueError('AtomicWriters can only be written to.') + + self._path = path + self._mode = mode + self._overwrite = overwrite + self._open_kwargs = open_kwargs + + def open(self): + ''' + Open the temporary file. + ''' + return self._open(self.get_fileobject) + + @contextlib.contextmanager + def _open(self, get_fileobject): + f = None # make sure f exists even if get_fileobject() fails + try: + success = False + with get_fileobject(**self._open_kwargs) as f: + yield f + self.sync(f) + self.commit(f) + success = True + finally: + if not success: + try: + self.rollback(f) + except Exception: + pass + + def get_fileobject(self, suffix="", prefix=tempfile.template, dir=None, + **kwargs): + '''Return the temporary file to use.''' + if dir is None: + dir = os.path.normpath(os.path.dirname(self._path)) + descriptor, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, + dir=dir) + # io.open() will take either the descriptor or the name, but we need + # the name later for commit()/replace_atomic() and couldn't find a way + # to get the filename from the descriptor. + os.close(descriptor) + kwargs['mode'] = self._mode + kwargs['file'] = name + return io.open(**kwargs) + + def sync(self, f): + '''responsible for clearing as many file caches as possible before + commit''' + f.flush() + _proper_fsync(f.fileno()) + + def commit(self, f): + '''Move the temporary file to the target location.''' + if self._overwrite: + replace_atomic(f.name, self._path) + else: + move_atomic(f.name, self._path) + + def rollback(self, f): + '''Clean up all temporary resources.''' + os.unlink(f.name) + + +def atomic_write(path, writer_cls=AtomicWriter, **cls_kwargs): + ''' + Simple atomic writes. This wraps :py:class:`AtomicWriter`:: + + with atomic_write(path) as f: + f.write(...) + + :param path: The target path to write to. + :param writer_cls: The writer class to use. This parameter is useful if you + subclassed :py:class:`AtomicWriter` to change some behavior and want to + use that new subclass. + + Additional keyword arguments are passed to the writer class. See + :py:class:`AtomicWriter`. + ''' + return writer_cls(path, **cls_kwargs).open() diff --git a/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/INSTALLER b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/METADATA b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/METADATA new file mode 100644 index 0000000..4cd941e --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/METADATA @@ -0,0 +1,475 @@ +Metadata-Version: 2.1 +Name: bandit +Version: 1.5.1 +Summary: Security oriented static analyser for python code. +Home-page: https://bandit.readthedocs.io/en/latest/ +Author: PyCQA +Author-email: code-quality@python.org +License: UNKNOWN +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: System Administrators +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Topic :: Security +Requires-Dist: GitPython (>=1.0.1) +Requires-Dist: PyYAML (>=3.12) +Requires-Dist: six (>=1.10.0) +Requires-Dist: stevedore (>=1.20.0) + +.. image:: https://github.com/PyCQA/bandit/blob/master/logo/logotype-sm.png + :alt: Bandit + +====== + +.. image:: https://travis-ci.org/PyCQA/bandit.svg?branch=master + :target: https://travis-ci.org/PyCQA/bandit/ + :alt: Build Status + +.. image:: https://img.shields.io/pypi/v/bandit.svg + :target: https://pypi.org/project/bandit/ + :alt: Latest Version + +.. image:: https://img.shields.io/pypi/pyversions/bandit.svg + :target: https://pypi.org/project/bandit/ + :alt: Python Versions + +.. image:: https://img.shields.io/pypi/format/bandit.svg + :target: https://pypi.org/project/bandit/ + :alt: Format + +.. image:: https://img.shields.io/badge/license-Apache%202-blue.svg + :target: https://github.com/PyCQA/bandit/blob/master/LICENSE + :alt: License + +A security linter from PyCQA + +* Free software: Apache license +* Documentation: https://bandit.readthedocs.io/en/latest/ +* Source: https://github.com/PyCQA/bandit +* Bugs: https://github.com/PyCQA/bandit/issues + +Overview +-------- +Bandit is a tool designed to find common security issues in Python code. To do +this Bandit processes each file, builds an AST from it, and runs appropriate +plugins against the AST nodes. Once Bandit has finished scanning all the files +it generates a report. + +Bandit was originally developed within the OpenStack Security Project and +later rehomed to PyCQA. + +Installation +------------ +Bandit is distributed on PyPI. The best way to install it is with pip: + + +Create a virtual environment (optional):: + + virtualenv bandit-env + +Install Bandit:: + + pip install bandit + # Or if you're working with a Python 3 project + pip3 install bandit + +Run Bandit:: + + bandit -r path/to/your/code + + +Bandit can also be installed from source. To do so, download the source tarball +from PyPI, then install it:: + + python setup.py install + + +Usage +----- +Example usage across a code tree:: + + bandit -r ~/your_repos/project + +Example usage across the ``examples/`` directory, showing three lines of +context and only reporting on the high-severity issues:: + + bandit examples/*.py -n 3 -lll + +Bandit can be run with profiles. To run Bandit against the examples directory +using only the plugins listed in the ``ShellInjection`` profile:: + + bandit examples/*.py -p ShellInjection + +Bandit also supports passing lines of code to scan using standard input. To +run Bandit with standard input:: + + cat examples/imports.py | bandit - + +Usage:: + + $ bandit -h + usage: bandit [-h] [-r] [-a {file,vuln}] [-n CONTEXT_LINES] [-c CONFIG_FILE] + [-p PROFILE] [-t TESTS] [-s SKIPS] [-l] [-i] + [-f {csv,custom,html,json,screen,txt,xml,yaml}] + [--msg-template MSG_TEMPLATE] [-o [OUTPUT_FILE]] [-v] [-d] + [--ignore-nosec] [-x EXCLUDED_PATHS] [-b BASELINE] + [--ini INI_PATH] [--version] + [targets [targets ...]] + + Bandit - a Python source code security analyzer + + positional arguments: + targets source file(s) or directory(s) to be tested + + optional arguments: + -h, --help show this help message and exit + -r, --recursive find and process files in subdirectories + -a {file,vuln}, --aggregate {file,vuln} + aggregate output by vulnerability (default) or by + filename + -n CONTEXT_LINES, --number CONTEXT_LINES + maximum number of code lines to output for each issue + -c CONFIG_FILE, --configfile CONFIG_FILE + optional config file to use for selecting plugins and + overriding defaults + -p PROFILE, --profile PROFILE + profile to use (defaults to executing all tests) + -t TESTS, --tests TESTS + comma-separated list of test IDs to run + -s SKIPS, --skip SKIPS + comma-separated list of test IDs to skip + -l, --level report only issues of a given severity level or higher + (-l for LOW, -ll for MEDIUM, -lll for HIGH) + -i, --confidence report only issues of a given confidence level or + higher (-i for LOW, -ii for MEDIUM, -iii for HIGH) + -f {csv,custom,html,json,screen,txt,xml,yaml}, --format {csv,custom,html,json,screen,txt,xml,yaml} + specify output format + --msg-template MSG_TEMPLATE + specify output message template (only usable with + --format custom), see CUSTOM FORMAT section for list + of available values + -o [OUTPUT_FILE], --output [OUTPUT_FILE] + write report to filename + -v, --verbose output extra information like excluded and included + files + -d, --debug turn on debug mode + --ignore-nosec do not skip lines with # nosec comments + -x EXCLUDED_PATHS, --exclude EXCLUDED_PATHS + comma-separated list of paths to exclude from scan + (note that these are in addition to the excluded paths + provided in the config file) + -b BASELINE, --baseline BASELINE + path of a baseline report to compare against (only + JSON-formatted files are accepted) + --ini INI_PATH path to a .bandit file that supplies command line + arguments + --version show program's version number and exit + + CUSTOM FORMATTING + ----------------- + + Available tags: + + {abspath}, {relpath}, {line}, {test_id}, + {severity}, {msg}, {confidence}, {range} + + Example usage: + + Default template: + bandit -r examples/ --format custom --msg-template \ + "{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}" + + Provides same output as: + bandit -r examples/ --format custom + + Tags can also be formatted in python string.format() style: + bandit -r examples/ --format custom --msg-template \ + "{relpath:20.20s}: {line:03}: {test_id:^8}: DEFECT: {msg:>20}" + + See python documentation for more information about formatting style: + https://docs.python.org/3.4/library/string.html + + The following tests were discovered and loaded: + ----------------------------------------------- + + B101 assert_used + B102 exec_used + B103 set_bad_file_permissions + B104 hardcoded_bind_all_interfaces + B105 hardcoded_password_string + B106 hardcoded_password_funcarg + B107 hardcoded_password_default + B108 hardcoded_tmp_directory + B110 try_except_pass + B112 try_except_continue + B201 flask_debug_true + B301 pickle + B302 marshal + B303 md5 + B304 ciphers + B305 cipher_modes + B306 mktemp_q + B307 eval + B308 mark_safe + B309 httpsconnection + B310 urllib_urlopen + B311 random + B312 telnetlib + B313 xml_bad_cElementTree + B314 xml_bad_ElementTree + B315 xml_bad_expatreader + B316 xml_bad_expatbuilder + B317 xml_bad_sax + B318 xml_bad_minidom + B319 xml_bad_pulldom + B320 xml_bad_etree + B321 ftplib + B322 input + B323 unverified_context + B324 hashlib_new_insecure_functions + B325 tempnam + B401 import_telnetlib + B402 import_ftplib + B403 import_pickle + B404 import_subprocess + B405 import_xml_etree + B406 import_xml_sax + B407 import_xml_expat + B408 import_xml_minidom + B409 import_xml_pulldom + B410 import_lxml + B411 import_xmlrpclib + B412 import_httpoxy + B413 import_pycrypto + B414 import_pycryptodome + B501 request_with_no_cert_validation + B502 ssl_with_bad_version + B503 ssl_with_bad_defaults + B504 ssl_with_no_version + B505 weak_cryptographic_key + B506 yaml_load + B507 ssh_no_host_key_verification + B601 paramiko_calls + B602 subprocess_popen_with_shell_equals_true + B603 subprocess_without_shell_equals_true + B604 any_other_function_with_shell_equals_true + B605 start_process_with_a_shell + B606 start_process_with_no_shell + B607 start_process_with_partial_path + B608 hardcoded_sql_expressions + B609 linux_commands_wildcard_injection + B610 django_extra_used + B611 django_rawsql_used + B701 jinja2_autoescape_false + B702 use_of_mako_templates + B703 django_mark_safe + + +Configuration +------------- +An optional config file may be supplied and may include: + - lists of tests which should or shouldn't be run + - exclude_dirs - sections of the path, that if matched, will be excluded from + scanning + - overridden plugin settings - may provide different settings for some + plugins + +Per Project Command Line Args +----------------------------- +Projects may include a `.bandit` file that specifies command line arguments +that should be supplied for that project. The currently supported arguments +are: + + - targets: comma separated list of target dirs/files to run bandit on + - exclude: comma separated list of excluded paths + - skips: comma separated list of tests to skip + - tests: comma separated list of tests to run + +To use this, put a .bandit file in your project's directory. For example: + +:: + + [bandit] + exclude: /test + +:: + + [bandit] + tests: B101,B102,B301 + + +Exclusions +---------- +In the event that a line of code triggers a Bandit issue, but that the line +has been reviewed and the issue is a false positive or acceptable for some +other reason, the line can be marked with a ``# nosec`` and any results +associated with it will not be reported. + +For example, although this line may cause Bandit to report a potential +security issue, it will not be reported:: + + self.process = subprocess.Popen('/bin/echo', shell=True) # nosec + + +Vulnerability Tests +------------------- +Vulnerability tests or "plugins" are defined in files in the plugins directory. + +Tests are written in Python and are autodiscovered from the plugins directory. +Each test can examine one or more type of Python statements. Tests are marked +with the types of Python statements they examine (for example: function call, +string, import, etc). + +Tests are executed by the ``BanditNodeVisitor`` object as it visits each node +in the AST. + +Test results are maintained in the ``BanditResultStore`` and aggregated for +output at the completion of a test run. + + +Writing Tests +------------- +To write a test: + - Identify a vulnerability to build a test for, and create a new file in + examples/ that contains one or more cases of that vulnerability. + - Consider the vulnerability you're testing for, mark the function with one + or more of the appropriate decorators: + - @checks('Call') + - @checks('Import', 'ImportFrom') + - @checks('Str') + - Create a new Python source file to contain your test, you can reference + existing tests for examples. + - The function that you create should take a parameter "context" which is + an instance of the context class you can query for information about the + current element being examined. You can also get the raw AST node for + more advanced use cases. Please see the context.py file for more. + - Extend your Bandit configuration file as needed to support your new test. + - Execute Bandit against the test file you defined in examples/ and ensure + that it detects the vulnerability. Consider variations on how this + vulnerability might present itself and extend the example file and the test + function accordingly. + + +Extending Bandit +---------------- + +Bandit allows users to write and register extensions for checks and formatters. +Bandit will load plugins from two entry-points: + +- `bandit.formatters` +- `bandit.plugins` + +Formatters need to accept 4 things: + +- `result_store`: An instance of `bandit.core.BanditResultStore` +- `file_list`: The list of files which were inspected in the scope +- `scores`: The scores awarded to each file in the scope +- `excluded_files`: The list of files that were excluded from the scope + +Plugins tend to take advantage of the `bandit.checks` decorator which allows +the author to register a check for a particular type of AST node. For example + +:: + + @bandit.checks('Call') + def prohibit_unsafe_deserialization(context): + if 'unsafe_load' in context.call_function_name_qual: + return bandit.Issue( + severity=bandit.HIGH, + confidence=bandit.HIGH, + text="Unsafe deserialization detected." + ) + +To register your plugin, you have two options: + +1. If you're using setuptools directly, add something like the following to + your ``setup`` call:: + + # If you have an imaginary bson formatter in the bandit_bson module + # and a function called `formatter`. + entry_points={'bandit.formatters': ['bson = bandit_bson:formatter']} + # Or a check for using mako templates in bandit_mako that + entry_points={'bandit.plugins': ['mako = bandit_mako']} + +2. If you're using pbr, add something like the following to your `setup.cfg` + file:: + + [entry_points] + bandit.formatters = + bson = bandit_bson:formatter + bandit.plugins = + mako = bandit_mako + +Contributing +------------ +Contributions to Bandit are always welcome! + +The best way to get started with Bandit is to grab the source:: + + git clone https://github.com/PyCQA/bandit.git + +You can test any changes with tox:: + + pip install tox + tox -e pep8 + tox -e py27 + tox -e py35 + tox -e docs + tox -e cover + +Please make PR requests using your own branch, and not master:: + + git checkout -b mychange + git push origin mychange + +Reporting Bugs +-------------- +Bugs should be reported on github. To file a bug against Bandit, visit: +https://github.com/PyCQA/bandit/issues + +Under Which Version of Python Should I Install Bandit? +------------------------------------------------------ +The answer to this question depends on the project(s) you will be running +Bandit against. If your project is only compatible with Python 2.7, you +should install Bandit to run under Python 2.7. If your project is only +compatible with Python 3.5, then use 3.5 respectively. If your project supports +both, you *could* run Bandit with both versions but you don't have to. + +Bandit uses the `ast` module from Python's standard library in order to +analyze your Python code. The `ast` module is only able to parse Python code +that is valid in the version of the interpreter from which it is imported. In +other words, if you try to use Python 2.7's `ast` module to parse code written +for 3.5 that uses, for example, `yield from` with asyncio, then you'll have +syntax errors that will prevent Bandit from working properly. Alternatively, +if you are relying on 2.7's octal notation of `0777` then you'll have a syntax +error if you run Bandit on 3.x. + + +References +========== + +Bandit docs: https://bandit.readthedocs.io/en/latest/ + +Python AST module documentation: https://docs.python.org/2/library/ast.html + +Green Tree Snakes - the missing Python AST docs: +https://greentreesnakes.readthedocs.org/en/latest/ + +Documentation of the various types of AST nodes that Bandit currently covers +or could be extended to cover: +https://greentreesnakes.readthedocs.org/en/latest/nodes.html + + + diff --git a/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/RECORD b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/RECORD new file mode 100644 index 0000000..9ff2581 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/RECORD @@ -0,0 +1,130 @@ +../../../bin/bandit,sha256=104PWCu_-KCIB55nCkgEcwG3GmFncbcuKSUpCQL9kg4,269 +../../../bin/bandit-baseline,sha256=A_Co1__iRCL58cQKsqlSvxXhlIdHWDk-R5-SnI6nkd0,273 +../../../bin/bandit-config-generator,sha256=Hv8hGDst-2fCSlxcchM_K7bWWMjOsw4lqzfKDfiSRh4,281 +bandit-1.5.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +bandit-1.5.1.dist-info/METADATA,sha256=77zxQ-vXQNa1vq_h1wyIqnx5F4Ja_i55o6KP1rAvEUU,16686 +bandit-1.5.1.dist-info/RECORD,, +bandit-1.5.1.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 +bandit-1.5.1.dist-info/entry_points.txt,sha256=Yv0HmDCLhsqWQ2-Hr5vkyTuHd6k0obe3_53wnIQi0zI,3384 +bandit-1.5.1.dist-info/pbr.json,sha256=nSZiV4vxHdaFvBn4mjRTK5N74aSZVZtoP9B7f8V27Zg,47 +bandit-1.5.1.dist-info/top_level.txt,sha256=SVJ-U-In_cpe2PQq5ZOlxjEnlAV5MfjvfFuGzg8wgdg,7 +bandit/__init__.py,sha256=BoWeY00rKrG-kKxrEAbq0O5Uan19b_c-OvTiu2kWIjw,1175 +bandit/__init__.pyc,, +bandit/__main__.py,sha256=6K6Vee3y7MPC6ywhBQaTZrw26xJhnxEz_L2ayqgLa4I,62 +bandit/__main__.pyc,, +bandit/blacklists/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bandit/blacklists/__init__.pyc,, +bandit/blacklists/calls.py,sha256=Xzdo3kj31zKvCNuSL614ogPjJ7vp_lVKpfEu89xxO34,26560 +bandit/blacklists/calls.pyc,, +bandit/blacklists/imports.py,sha256=HbuFwutXjHL4YSQtOPbPaOinJ7RxX5gnYHsPHBW2BgY,15796 +bandit/blacklists/imports.pyc,, +bandit/blacklists/utils.py,sha256=O_AS3BPDHMeFRMAUGgqSHtOPz63TxVlmUCcZBVTI3t0,870 +bandit/blacklists/utils.pyc,, +bandit/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bandit/cli/__init__.pyc,, +bandit/cli/baseline.py,sha256=MhdDLYxDt65ZCEC-JfTcxxGqWlbOGGguYSBWQIUtk8k,7645 +bandit/cli/baseline.pyc,, +bandit/cli/config_generator.py,sha256=ftBdIrXxzZzSvJWtP7m1qn6ejqQu2UCeG3k9m-mtu4I,6323 +bandit/cli/config_generator.pyc,, +bandit/cli/main.py,sha256=phN5rVIb0hu3_fP6MRv8L2bqQsotxGhbr4zyBTQ510M,14610 +bandit/cli/main.pyc,, +bandit/core/__init__.py,sha256=t3kUM7_udVcitNfjCOEIG9w8iqcZ4VLpR6GsdH-zL6o,1089 +bandit/core/__init__.pyc,, +bandit/core/blacklisting.py,sha256=tzELQvilIIRr9cafqgFzfY0IjiVqkYbHdfQQf6IQ2s0,3045 +bandit/core/blacklisting.pyc,, +bandit/core/config.py,sha256=vIVk923NQdw7-mXlQeyF5eDd7ppto_LxRxRPMLF83qc,9488 +bandit/core/config.pyc,, +bandit/core/constants.py,sha256=pJ9kCKhQXYMnHiNKhKY5Ip_4bH_4oXXIBG6hkysrsQo,1639 +bandit/core/constants.pyc,, +bandit/core/context.py,sha256=qJRYDHlxfPe-jdo3Fm8ae5zWGL3CG6hpg62UUhSRvZE,11612 +bandit/core/context.pyc,, +bandit/core/docs_utils.py,sha256=A-ybwi1ddgX-Qc7JFYQ7zo6C6bXSCSKJjcp92xsu--k,2114 +bandit/core/docs_utils.pyc,, +bandit/core/extension_loader.py,sha256=hQqRi6_HBcAfdJxub6z6L8VqEXubhhdps1VGLwtyqoM,4481 +bandit/core/extension_loader.pyc,, +bandit/core/issue.py,sha256=BscF0Uk4u3-HmmanSLNFKYdwRevwIvrGK51Vq9fWoI0,4850 +bandit/core/issue.pyc,, +bandit/core/manager.py,sha256=seYETNpM-Pi9ihMxPG3ggwA45WK3euXIR0EGqLgHGh4,15313 +bandit/core/manager.pyc,, +bandit/core/meta_ast.py,sha256=3s4EI5dzUMpf5BMsycVNcU9Z3urZtZvAa5pTbWDlf7E,1659 +bandit/core/meta_ast.pyc,, +bandit/core/metrics.py,sha256=R-3cfJJqzzndiGaTeLdi6QWobQfYtjhtDxRnAi_6T80,3609 +bandit/core/metrics.pyc,, +bandit/core/node_visitor.py,sha256=S7kpy4n3_Q4sR8foFaUhQh5fV7f8P83FbE_QXwxtE54,10361 +bandit/core/node_visitor.pyc,, +bandit/core/test_properties.py,sha256=C5Trw_91ymVhRDngPkUFRjhL3U24MabhuhzcVYo4RTo,2518 +bandit/core/test_properties.pyc,, +bandit/core/test_set.py,sha256=5zEfjvAzGRHYwIxrRaXMG3J2dNGVYsUf80UCvUTfS2E,4569 +bandit/core/test_set.pyc,, +bandit/core/tester.py,sha256=1Ir-WQLbnpuf6zdR9K3dbf_s9y-kimzohsEvMlukwHM,4119 +bandit/core/tester.pyc,, +bandit/core/utils.py,sha256=BNIr3TW18LgkJw_UDWD2g9WRzr71zs_DwsR4441ZYvg,10957 +bandit/core/utils.pyc,, +bandit/formatters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bandit/formatters/__init__.pyc,, +bandit/formatters/csv.py,sha256=IJhqMszLEP1Vbr_PN9i6NCswYmqsC50K4q1wLY_SUDM,2716 +bandit/formatters/csv.pyc,, +bandit/formatters/custom.py,sha256=7uZOdlmkQcKgWD0a47_RmMoerxL_kZoZ_bftlCePzeE,5841 +bandit/formatters/custom.pyc,, +bandit/formatters/html.py,sha256=xnd5rB8JJKVsY1mzLkkU9Zp_hqZZ1zBB6BEc_SD6-lY,9137 +bandit/formatters/html.pyc,, +bandit/formatters/json.py,sha256=4a0iL2ETzQWYAvaWM_Wwv47diE4SIsSX9zZx9HYvTiI,4643 +bandit/formatters/json.pyc,, +bandit/formatters/screen.py,sha256=hpve2zCl196aW9dfQkkwqcoIQL07XznzLmYA1z1SeDk,6172 +bandit/formatters/screen.pyc,, +bandit/formatters/text.py,sha256=CfVZvSyqDVF1_4H8tSPPmaZJ6OP3KIVp-Aa2yzncBK0,5764 +bandit/formatters/text.pyc,, +bandit/formatters/utils.py,sha256=I5bz-jR51v0maJTvZqIQO9BGTxylJVvMhhu-7yECra0,1589 +bandit/formatters/utils.pyc,, +bandit/formatters/xml.py,sha256=IX-pta4yKMUwJvytXN6FcrC1GuwyLL0Y_8l2cpJtI8k,3156 +bandit/formatters/xml.pyc,, +bandit/formatters/yaml.py,sha256=iMA6AFDFZ_2QBC9ppD5d4ysN3eVSqxHetC6MZkc6XBU,3977 +bandit/formatters/yaml.pyc,, +bandit/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bandit/plugins/__init__.pyc,, +bandit/plugins/app_debug.py,sha256=FXPfM5qIuhI1uDLeziwaaXwCqUVdRVXo7OIKSqMQj3M,2538 +bandit/plugins/app_debug.pyc,, +bandit/plugins/asserts.py,sha256=o3ugmGqXL_98fVDgLZSh4UpJ0IFSce-hiitYLl-Kf40,2095 +bandit/plugins/asserts.pyc,, +bandit/plugins/crypto_request_no_cert_validation.py,sha256=zFay8uLWftFoNTzT8IAEB0gk7b26r5HLR21cXWH5UaU,2675 +bandit/plugins/crypto_request_no_cert_validation.pyc,, +bandit/plugins/django_sql_injection.py,sha256=VboocL9oP-RZ71Peedv-0Cii5hJzR4LyoTOtoecqNGs,3712 +bandit/plugins/django_sql_injection.pyc,, +bandit/plugins/django_xss.py,sha256=fR9JQ1NoZ_Qn2SYz5cRBCzRrBfSkoxIu7CNo67Mavy8,10937 +bandit/plugins/django_xss.pyc,, +bandit/plugins/exec.py,sha256=mmV8cQhIwEKgAfan2Ze3dTTmRy5csafqgHR726RAadw,1811 +bandit/plugins/exec.pyc,, +bandit/plugins/general_bad_file_permissions.py,sha256=JTGal1m1LdN-UrXJrpxVt3PeEpMyzmmSjUqP3yPr1-I,3250 +bandit/plugins/general_bad_file_permissions.pyc,, +bandit/plugins/general_bind_all_interfaces.py,sha256=95BRu5MJO3VruW45ic3Ja90QJdZAcSXSh4Bq8MBHfxk,1792 +bandit/plugins/general_bind_all_interfaces.pyc,, +bandit/plugins/general_hardcoded_password.py,sha256=6fw3jfc_HMtlSkXKr1TI5MSz6UNywgl4lIy-AUFudWs,6396 +bandit/plugins/general_hardcoded_password.pyc,, +bandit/plugins/general_hardcoded_tmp.py,sha256=0Qn-xNv9GWm5VXBvdRScwCCBIW4QGdh5YOnGonwVKro,2553 +bandit/plugins/general_hardcoded_tmp.pyc,, +bandit/plugins/hashlib_new_insecure_functions.py,sha256=LdHnVXFZ2TdLLU7SerW30XyYxW7HeEtOAy-Uxxv-Q-8,2371 +bandit/plugins/hashlib_new_insecure_functions.pyc,, +bandit/plugins/injection_paramiko.py,sha256=F3dmc6S2bW9KRydJEiNBJIXLjRL1nO-aOPDOYWxhXXY,2643 +bandit/plugins/injection_paramiko.pyc,, +bandit/plugins/injection_shell.py,sha256=F02FWh8jR7bxMVEBQNt4sz4LYRny0cfqNeIqhVZBctg,24971 +bandit/plugins/injection_shell.pyc,, +bandit/plugins/injection_sql.py,sha256=joQaczwfUW9CC0s1bouy4Yd5iPjYN4ghJxgCsE1i338,3677 +bandit/plugins/injection_sql.pyc,, +bandit/plugins/injection_wildcard.py,sha256=nrodof4DKcfR6lvm3Yh0VxWwOX9aaKRKwkWAJ4M-MbA,5242 +bandit/plugins/injection_wildcard.pyc,, +bandit/plugins/insecure_ssl_tls.py,sha256=kdkCmm9UEvSxaKkzsMww-HZyzqXl_N_v9-ISBdA3NHk,10153 +bandit/plugins/insecure_ssl_tls.pyc,, +bandit/plugins/jinja2_templates.py,sha256=WeIdyCYMqmkdL8XI4xrCGrpyItofWXGyPGdhN8vZlDc,5755 +bandit/plugins/jinja2_templates.pyc,, +bandit/plugins/mako_templates.py,sha256=Cl-6Vmvpshx1kq-cwLy1LmMVA3rK3pTYsQ_-9klmLlk,2904 +bandit/plugins/mako_templates.pyc,, +bandit/plugins/ssh_no_host_key_verification.py,sha256=pniaO6sIJDxnV5pVrq4VmG7WmDz6WDIjp5K7o-xskbo,2424 +bandit/plugins/ssh_no_host_key_verification.pyc,, +bandit/plugins/try_except_continue.py,sha256=bNeh6vPLYGWxj6AsZSsXPFci64WVHyfh2rSUbZhMB08,3294 +bandit/plugins/try_except_continue.pyc,, +bandit/plugins/try_except_pass.py,sha256=MsFF49zxF_Wd22MR1TukRzvVPDnf1lHgQGg_-HcE2Go,3162 +bandit/plugins/try_except_pass.pyc,, +bandit/plugins/weak_cryptographic_key.py,sha256=yuxz3L9rqcITGSuvqYAW6U6UPFZdBSy66Ra179-Jwng,5146 +bandit/plugins/weak_cryptographic_key.pyc,, +bandit/plugins/yaml_load.py,sha256=VREUsWCB2BnkFqF0LpgZNyqlmUDIxyvUYJTDBoDUn5A,2452 +bandit/plugins/yaml_load.pyc,, diff --git a/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/WHEEL b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/WHEEL new file mode 100644 index 0000000..1316c41 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.31.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/entry_points.txt b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/entry_points.txt new file mode 100644 index 0000000..892afb3 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/entry_points.txt @@ -0,0 +1,54 @@ +[bandit.blacklists] +calls = bandit.blacklists.calls:gen_blacklist +imports = bandit.blacklists.imports:gen_blacklist + +[bandit.formatters] +csv = bandit.formatters.csv:report +custom = bandit.formatters.custom:report +html = bandit.formatters.html:report +json = bandit.formatters.json:report +screen = bandit.formatters.screen:report +txt = bandit.formatters.text:report +xml = bandit.formatters.xml:report +yaml = bandit.formatters.yaml:report + +[bandit.plugins] +any_other_function_with_shell_equals_true = bandit.plugins.injection_shell:any_other_function_with_shell_equals_true +assert_used = bandit.plugins.asserts:assert_used +django_extra_used = bandit.plugins.django_sql_injection:django_extra_used +django_mark_safe = bandit.plugins.django_xss:django_mark_safe +django_rawsql_used = bandit.plugins.django_sql_injection:django_rawsql_used +exec_used = bandit.plugins.exec:exec_used +flask_debug_true = bandit.plugins.app_debug:flask_debug_true +hardcoded_bind_all_interfaces = bandit.plugins.general_bind_all_interfaces:hardcoded_bind_all_interfaces +hardcoded_password_default = bandit.plugins.general_hardcoded_password:hardcoded_password_default +hardcoded_password_funcarg = bandit.plugins.general_hardcoded_password:hardcoded_password_funcarg +hardcoded_password_string = bandit.plugins.general_hardcoded_password:hardcoded_password_string +hardcoded_sql_expressions = bandit.plugins.injection_sql:hardcoded_sql_expressions +hardcoded_tmp_directory = bandit.plugins.general_hardcoded_tmp:hardcoded_tmp_directory +hashlib_new_insecure_functions = bandit.plugins.hashlib_new_insecure_functions:hashlib_new +jinja2_autoescape_false = bandit.plugins.jinja2_templates:jinja2_autoescape_false +linux_commands_wildcard_injection = bandit.plugins.injection_wildcard:linux_commands_wildcard_injection +paramiko_calls = bandit.plugins.injection_paramiko:paramiko_calls +request_with_no_cert_validation = bandit.plugins.crypto_request_no_cert_validation:request_with_no_cert_validation +set_bad_file_permissions = bandit.plugins.general_bad_file_permissions:set_bad_file_permissions +ssh_no_host_key_verification = bandit.plugins.ssh_no_host_key_verification:ssh_no_host_key_verification +ssl_with_bad_defaults = bandit.plugins.insecure_ssl_tls:ssl_with_bad_defaults +ssl_with_bad_version = bandit.plugins.insecure_ssl_tls:ssl_with_bad_version +ssl_with_no_version = bandit.plugins.insecure_ssl_tls:ssl_with_no_version +start_process_with_a_shell = bandit.plugins.injection_shell:start_process_with_a_shell +start_process_with_no_shell = bandit.plugins.injection_shell:start_process_with_no_shell +start_process_with_partial_path = bandit.plugins.injection_shell:start_process_with_partial_path +subprocess_popen_with_shell_equals_true = bandit.plugins.injection_shell:subprocess_popen_with_shell_equals_true +subprocess_without_shell_equals_true = bandit.plugins.injection_shell:subprocess_without_shell_equals_true +try_except_continue = bandit.plugins.try_except_continue:try_except_continue +try_except_pass = bandit.plugins.try_except_pass:try_except_pass +use_of_mako_templates = bandit.plugins.mako_templates:use_of_mako_templates +weak_cryptographic_key = bandit.plugins.weak_cryptographic_key:weak_cryptographic_key +yaml_load = bandit.plugins.yaml_load:yaml_load + +[console_scripts] +bandit = bandit.cli.main:main +bandit-baseline = bandit.cli.baseline:main +bandit-config-generator = bandit.cli.config_generator:main + diff --git a/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/pbr.json b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/pbr.json new file mode 100644 index 0000000..697197a --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/pbr.json @@ -0,0 +1 @@ +{"git_version": "2421df6", "is_release": false} \ No newline at end of file diff --git a/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/top_level.txt b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/top_level.txt new file mode 100644 index 0000000..4f97a52 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit-1.5.1.dist-info/top_level.txt @@ -0,0 +1 @@ +bandit diff --git a/env_27/lib/python2.7/site-packages/bandit/__init__.py b/env_27/lib/python2.7/site-packages/bandit/__init__.py new file mode 100644 index 0000000..53d6d23 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/__init__.py @@ -0,0 +1,31 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + +from bandit.core import config # noqa +from bandit.core import context # noqa +from bandit.core import manager # noqa +from bandit.core import meta_ast # noqa +from bandit.core import node_visitor # noqa +from bandit.core import test_set # noqa +from bandit.core import tester # noqa +from bandit.core import utils # noqa +from bandit.core.constants import * # noqa +from bandit.core.issue import * # noqa +from bandit.core.test_properties import * # noqa + +__version__ = pbr.version.VersionInfo('bandit').version_string() diff --git a/env_27/lib/python2.7/site-packages/bandit/__main__.py b/env_27/lib/python2.7/site-packages/bandit/__main__.py new file mode 100644 index 0000000..8c36edf --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/__main__.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python +from bandit.cli import main +main.main() diff --git a/env_27/lib/python2.7/site-packages/bandit/blacklists/__init__.py b/env_27/lib/python2.7/site-packages/bandit/blacklists/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/env_27/lib/python2.7/site-packages/bandit/blacklists/calls.py b/env_27/lib/python2.7/site-packages/bandit/blacklists/calls.py new file mode 100644 index 0000000..85fb678 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/blacklists/calls.py @@ -0,0 +1,577 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +==================================================== +Blacklist various Python calls known to be dangerous +==================================================== + +This blacklist data checks for a number of Python calls known to have possible +security implications. The following blacklist tests are run against any +function calls encoutered in the scanned code base, triggered by encoutering +ast.Call nodes. + +B301: pickle +------------ + +Pickle and modules that wrap it can be unsafe when used to +deserialize untrusted data, possible security issue. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B301 | pickle | - pickle.loads | Medium | +| | | - pickle.load | | +| | | - pickle.Unpickler | | +| | | - cPickle.loads | | +| | | - cPickle.load | | +| | | - cPickle.Unpickler | | +| | | - dill.loads | | +| | | - dill.load | | +| | | - dill.Unpickler | | ++------+---------------------+------------------------------------+-----------+ + +B302: marshal +------------- + +Deserialization with the marshal module is possibly dangerous. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B302 | marshal | - marshal.load | Medium | +| | | - marshal.loads | | ++------+---------------------+------------------------------------+-----------+ + +B303: md5 +--------- + +Use of insecure MD2, MD4, MD5, or SHA1 hash function. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B303 | md5 | - hashlib.md5 | Medium | +| | | - hashlib.sha1 | | +| | | - Crypto.Hash.MD2.new | | +| | | - Crypto.Hash.MD4.new | | +| | | - Crypto.Hash.MD5.new | | +| | | - Crypto.Hash.SHA.new | | +| | | - Cryptodome.Hash.MD2.new | | +| | | - Cryptodome.Hash.MD4.new | | +| | | - Cryptodome.Hash.MD5.new | | +| | | - Cryptodome.Hash.SHA.new | | +| | | - cryptography.hazmat.primitives | | +| | | .hashes.MD5 | | +| | | - cryptography.hazmat.primitives | | +| | | .hashes.SHA1 | | ++------+---------------------+------------------------------------+-----------+ + +B304 - B305: ciphers and modes +------------------------------ + +Use of insecure cipher or cipher mode. Replace with a known secure cipher such +as AES. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B304 | ciphers | - Crypto.Cipher.ARC2.new | High | +| | | - Crypto.Cipher.ARC4.new | | +| | | - Crypto.Cipher.Blowfish.new | | +| | | - Crypto.Cipher.DES.new | | +| | | - Crypto.Cipher.XOR.new | | +| | | - Cryptodome.Cipher.ARC2.new | | +| | | - Cryptodome.Cipher.ARC4.new | | +| | | - Cryptodome.Cipher.Blowfish.new | | +| | | - Cryptodome.Cipher.DES.new | | +| | | - Cryptodome.Cipher.XOR.new | | +| | | - cryptography.hazmat.primitives | | +| | | .ciphers.algorithms.ARC4 | | +| | | - cryptography.hazmat.primitives | | +| | | .ciphers.algorithms.Blowfish | | +| | | - cryptography.hazmat.primitives | | +| | | .ciphers.algorithms.IDEA | | ++------+---------------------+------------------------------------+-----------+ +| B305 | cipher_modes | - cryptography.hazmat.primitives | Medium | +| | | .ciphers.modes.ECB | | ++------+---------------------+------------------------------------+-----------+ + +B306: mktemp_q +-------------- + +Use of insecure and deprecated function (mktemp). + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B306 | mktemp_q | - tempfile.mktemp | Medium | ++------+---------------------+------------------------------------+-----------+ + +B307: eval +---------- + +Use of possibly insecure function - consider using safer ast.literal_eval. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B307 | eval | - eval | Medium | ++------+---------------------+------------------------------------+-----------+ + +B308: mark_safe +--------------- + +Use of mark_safe() may expose cross-site scripting vulnerabilities and should +be reviewed. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B308 | mark_safe | - django.utils.safestring.mark_safe| Medium | ++------+---------------------+------------------------------------+-----------+ + +B309: httpsconnection +--------------------- + +Use of HTTPSConnection on older versions of Python prior to 2.7.9 and 3.4.3 do +not provide security, see https://wiki.openstack.org/wiki/OSSN/OSSN-0033 + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B309 | httpsconnection | - httplib.HTTPSConnection | Medium | +| | | - http.client.HTTPSConnection | | +| | | - six.moves.http_client | | +| | | .HTTPSConnection | | ++------+---------------------+------------------------------------+-----------+ + +B310: urllib_urlopen +-------------------- + +Audit url open for permitted schemes. Allowing use of 'file:'' or custom +schemes is often unexpected. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B310 | urllib_urlopen | - urllib.urlopen | Medium | +| | | - urllib.request.urlopen | | +| | | - urllib.urlretrieve | | +| | | - urllib.request.urlretrieve | | +| | | - urllib.URLopener | | +| | | - urllib.request.URLopener | | +| | | - urllib.FancyURLopener | | +| | | - urllib.request.FancyURLopener | | +| | | - urllib2.urlopen | | +| | | - urllib2.Request | | +| | | - six.moves.urllib.request.urlopen | | +| | | - six.moves.urllib.request | | +| | | .urlretrieve | | +| | | - six.moves.urllib.request | | +| | | .URLopener | | +| | | - six.moves.urllib.request | | +| | | .FancyURLopener | | ++------+---------------------+------------------------------------+-----------+ + +B311: random +------------ + +Standard pseudo-random generators are not suitable for security/cryptographic +purposes. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B311 | random | - random.random | Low | +| | | - random.randrange | | +| | | - random.randint | | +| | | - random.choice | | +| | | - random.uniform | | +| | | - random.triangular | | ++------+---------------------+------------------------------------+-----------+ + +B312: telnetlib +--------------- + +Telnet-related functions are being called. Telnet is considered insecure. Use +SSH or some other encrypted protocol. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B312 | telnetlib | - telnetlib.\* | High | ++------+---------------------+------------------------------------+-----------+ + +B313 - B320: XML +---------------- + +Most of this is based off of Christian Heimes' work on defusedxml: +https://pypi.org/project/defusedxml/#defusedxml-sax + +Using various XLM methods to parse untrusted XML data is known to be vulnerable +to XML attacks. Methods should be replaced with their defusedxml equivalents. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B313 | xml_bad_cElementTree| - xml.etree.cElementTree.parse | Medium | +| | | - xml.etree.cElementTree.iterparse | | +| | | - xml.etree.cElementTree.fromstring| | +| | | - xml.etree.cElementTree.XMLParser | | ++------+---------------------+------------------------------------+-----------+ +| B314 | xml_bad_ElementTree | - xml.etree.ElementTree.parse | Medium | +| | | - xml.etree.ElementTree.iterparse | | +| | | - xml.etree.ElementTree.fromstring | | +| | | - xml.etree.ElementTree.XMLParser | | ++------+---------------------+------------------------------------+-----------+ +| B315 | xml_bad_expatreader | - xml.sax.expatreader.create_parser| Medium | ++------+---------------------+------------------------------------+-----------+ +| B316 | xml_bad_expatbuilder| - xml.dom.expatbuilder.parse | Medium | +| | | - xml.dom.expatbuilder.parseString | | ++------+---------------------+------------------------------------+-----------+ +| B317 | xml_bad_sax | - xml.sax.parse | Medium | +| | | - xml.sax.parseString | | +| | | - xml.sax.make_parser | | ++------+---------------------+------------------------------------+-----------+ +| B318 | xml_bad_minidom | - xml.dom.minidom.parse | Medium | +| | | - xml.dom.minidom.parseString | | ++------+---------------------+------------------------------------+-----------+ +| B319 | xml_bad_pulldom | - xml.dom.pulldom.parse | Medium | +| | | - xml.dom.pulldom.parseString | | ++------+---------------------+------------------------------------+-----------+ +| B320 | xml_bad_etree | - lxml.etree.parse | Medium | +| | | - lxml.etree.fromstring | | +| | | - lxml.etree.RestrictedElement | | +| | | - lxml.etree.GlobalParserTLS | | +| | | - lxml.etree.getDefaultParser | | +| | | - lxml.etree.check_docinfo | | ++------+---------------------+------------------------------------+-----------+ + +B321: ftplib +------------ + +FTP-related functions are being called. FTP is considered insecure. Use +SSH/SFTP/SCP or some other encrypted protocol. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B321 | ftplib | - ftplib.\* | High | ++------+---------------------+------------------------------------+-----------+ + +B322: input +------------ + +The input method in Python 2 will read from standard input, evaluate and +run the resulting string as python source code. This is similar, though in +many ways worse, then using eval. On Python 2, use raw_input instead, input +is safe in Python 3. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B322 | input | - input | High | ++------+---------------------+------------------------------------+-----------+ + +B323: unverified_context +------------------------ + +By default, Python will create a secure, verified ssl context for use in such +classes as HTTPSConnection. However, it still allows using an insecure +context via the _create_unverified_context that reverts to the previous +behavior that does not validate certificates or perform hostname checks. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B323 | unverified_context | - ssl._create_unverified_context | Medium | ++------+---------------------+------------------------------------+-----------+ + +B325: tempnam +-------------- + +Use of os.tempnam() and os.tmpnam() is vulnerable to symlink attacks. Consider +using tmpfile() instead. + +For further information: + https://docs.python.org/2.7/library/os.html#os.tempnam + https://bugs.python.org/issue17880 + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Calls | Severity | ++======+=====================+====================================+===========+ +| B325 | tempnam | - os.tempnam | Medium | +| | | - os.tmpnam | | ++------+---------------------+------------------------------------+-----------+ + +""" + +from bandit.blacklists import utils + + +def gen_blacklist(): + """Generate a list of items to blacklist. + + Methods of this type, "bandit.blacklist" plugins, are used to build a list + of items that bandit's built in blacklisting tests will use to trigger + issues. They replace the older blacklist* test plugins and allow + blacklisted items to have a unique bandit ID for filtering and profile + usage. + + :return: a dictionary mapping node types to a list of blacklist data + """ + + sets = [] + sets.append(utils.build_conf_dict( + 'pickle', 'B301', + ['pickle.loads', + 'pickle.load', + 'pickle.Unpickler', + 'cPickle.loads', + 'cPickle.load', + 'cPickle.Unpickler', + 'dill.loads', + 'dill.load', + 'dill.Unpickler'], + 'Pickle and modules that wrap it can be unsafe when used to ' + 'deserialize untrusted data, possible security issue.' + )) + + sets.append(utils.build_conf_dict( + 'marshal', 'B302', ['marshal.load', 'marshal.loads'], + 'Deserialization with the marshal module is possibly dangerous.' + )) + + sets.append(utils.build_conf_dict( + 'md5', 'B303', + ['hashlib.md5', + 'hashlib.sha1', + 'Crypto.Hash.MD2.new', + 'Crypto.Hash.MD4.new', + 'Crypto.Hash.MD5.new', + 'Crypto.Hash.SHA.new', + 'Cryptodome.Hash.MD2.new', + 'Cryptodome.Hash.MD4.new', + 'Cryptodome.Hash.MD5.new', + 'Cryptodome.Hash.SHA.new', + 'cryptography.hazmat.primitives.hashes.MD5', + 'cryptography.hazmat.primitives.hashes.SHA1'], + 'Use of insecure MD2, MD4, MD5, or SHA1 hash function.' + )) + + sets.append(utils.build_conf_dict( + 'ciphers', 'B304', + ['Crypto.Cipher.ARC2.new', + 'Crypto.Cipher.ARC4.new', + 'Crypto.Cipher.Blowfish.new', + 'Crypto.Cipher.DES.new', + 'Crypto.Cipher.XOR.new', + 'Cryptodome.Cipher.ARC2.new', + 'Cryptodome.Cipher.ARC4.new', + 'Cryptodome.Cipher.Blowfish.new', + 'Cryptodome.Cipher.DES.new', + 'Cryptodome.Cipher.XOR.new', + 'cryptography.hazmat.primitives.ciphers.algorithms.ARC4', + 'cryptography.hazmat.primitives.ciphers.algorithms.Blowfish', + 'cryptography.hazmat.primitives.ciphers.algorithms.IDEA'], + 'Use of insecure cipher {name}. Replace with a known secure' + ' cipher such as AES.', + 'HIGH' + )) + + sets.append(utils.build_conf_dict( + 'cipher_modes', 'B305', + ['cryptography.hazmat.primitives.ciphers.modes.ECB'], + 'Use of insecure cipher mode {name}.' + )) + + sets.append(utils.build_conf_dict( + 'mktemp_q', 'B306', ['tempfile.mktemp'], + 'Use of insecure and deprecated function (mktemp).' + )) + + sets.append(utils.build_conf_dict( + 'eval', 'B307', ['eval'], + 'Use of possibly insecure function - consider using safer ' + 'ast.literal_eval.' + )) + + sets.append(utils.build_conf_dict( + 'mark_safe', 'B308', ['django.utils.safestring.mark_safe'], + 'Use of mark_safe() may expose cross-site scripting ' + 'vulnerabilities and should be reviewed.' + )) + + sets.append(utils.build_conf_dict( + 'httpsconnection', 'B309', + ['httplib.HTTPSConnection', + 'http.client.HTTPSConnection', + 'six.moves.http_client.HTTPSConnection'], + 'Use of HTTPSConnection on older versions of Python prior to 2.7.9 ' + 'and 3.4.3 do not provide security, see ' + 'https://wiki.openstack.org/wiki/OSSN/OSSN-0033' + )) + + sets.append(utils.build_conf_dict( + 'urllib_urlopen', 'B310', + ['urllib.urlopen', + 'urllib.request.urlopen', + 'urllib.urlretrieve', + 'urllib.request.urlretrieve', + 'urllib.URLopener', + 'urllib.request.URLopener', + 'urllib.FancyURLopener', + 'urllib.request.FancyURLopener', + 'urllib2.urlopen', + 'urllib2.Request', + 'six.moves.urllib.request.urlopen', + 'six.moves.urllib.request.urlretrieve', + 'six.moves.urllib.request.URLopener', + 'six.moves.urllib.request.FancyURLopener'], + 'Audit url open for permitted schemes. Allowing use of file:/ or ' + 'custom schemes is often unexpected.' + )) + + sets.append(utils.build_conf_dict( + 'random', 'B311', + ['random.random', + 'random.randrange', + 'random.randint', + 'random.choice', + 'random.uniform', + 'random.triangular'], + 'Standard pseudo-random generators are not suitable for ' + 'security/cryptographic purposes.', + 'LOW' + )) + + sets.append(utils.build_conf_dict( + 'telnetlib', 'B312', ['telnetlib.*'], + 'Telnet-related functions are being called. Telnet is considered ' + 'insecure. Use SSH or some other encrypted protocol.', + 'HIGH' + )) + + # Most of this is based off of Christian Heimes' work on defusedxml: + # https://pypi.org/project/defusedxml/#defusedxml-sax + + xml_msg = ('Using {name} to parse untrusted XML data is known to be ' + 'vulnerable to XML attacks. Replace {name} with its ' + 'defusedxml equivalent function or make sure ' + 'defusedxml.defuse_stdlib() is called') + + sets.append(utils.build_conf_dict( + 'xml_bad_cElementTree', 'B313', + ['xml.etree.cElementTree.parse', + 'xml.etree.cElementTree.iterparse', + 'xml.etree.cElementTree.fromstring', + 'xml.etree.cElementTree.XMLParser'], + xml_msg + )) + + sets.append(utils.build_conf_dict( + 'xml_bad_ElementTree', 'B314', + ['xml.etree.ElementTree.parse', + 'xml.etree.ElementTree.iterparse', + 'xml.etree.ElementTree.fromstring', + 'xml.etree.ElementTree.XMLParser'], + xml_msg + )) + + sets.append(utils.build_conf_dict( + 'xml_bad_expatreader', 'B315', ['xml.sax.expatreader.create_parser'], + xml_msg + )) + + sets.append(utils.build_conf_dict( + 'xml_bad_expatbuilder', 'B316', + ['xml.dom.expatbuilder.parse', + 'xml.dom.expatbuilder.parseString'], + xml_msg + )) + + sets.append(utils.build_conf_dict( + 'xml_bad_sax', 'B317', + ['xml.sax.parse', + 'xml.sax.parseString', + 'xml.sax.make_parser'], + xml_msg + )) + + sets.append(utils.build_conf_dict( + 'xml_bad_minidom', 'B318', + ['xml.dom.minidom.parse', + 'xml.dom.minidom.parseString'], + xml_msg + )) + + sets.append(utils.build_conf_dict( + 'xml_bad_pulldom', 'B319', + ['xml.dom.pulldom.parse', + 'xml.dom.pulldom.parseString'], + xml_msg + )) + + sets.append(utils.build_conf_dict( + 'xml_bad_etree', 'B320', + ['lxml.etree.parse', + 'lxml.etree.fromstring', + 'lxml.etree.RestrictedElement', + 'lxml.etree.GlobalParserTLS', + 'lxml.etree.getDefaultParser', + 'lxml.etree.check_docinfo'], + ('Using {name} to parse untrusted XML data is known to be ' + 'vulnerable to XML attacks. Replace {name} with its ' + 'defusedxml equivalent function.') + )) + + # end of XML tests + + sets.append(utils.build_conf_dict( + 'ftplib', 'B321', ['ftplib.*'], + 'FTP-related functions are being called. FTP is considered ' + 'insecure. Use SSH/SFTP/SCP or some other encrypted protocol.', + 'HIGH' + )) + + sets.append(utils.build_conf_dict( + 'input', 'B322', ['input'], + 'The input method in Python 2 will read from standard input, ' + 'evaluate and run the resulting string as python source code. This ' + 'is similar, though in many ways worse, then using eval. On Python ' + '2, use raw_input instead, input is safe in Python 3.', + 'HIGH' + )) + + sets.append(utils.build_conf_dict( + 'unverified_context', 'B323', ['ssl._create_unverified_context'], + 'By default, Python will create a secure, verified ssl context for ' + 'use in such classes as HTTPSConnection. However, it still allows ' + 'using an insecure context via the _create_unverified_context that ' + 'reverts to the previous behavior that does not validate certificates ' + 'or perform hostname checks.' + )) + + # skipped B324 (used in bandit/plugins/hashlib_new_insecure_functions.py) + + sets.append(utils.build_conf_dict( + 'tempnam', 'B325', ['os.tempnam', 'os.tmpnam'], + 'Use of os.tempnam() and os.tmpnam() is vulnerable to symlink ' + 'attacks. Consider using tmpfile() instead.' + )) + + return {'Call': sets} diff --git a/env_27/lib/python2.7/site-packages/bandit/blacklists/imports.py b/env_27/lib/python2.7/site-packages/bandit/blacklists/imports.py new file mode 100644 index 0000000..aa4ca3e --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/blacklists/imports.py @@ -0,0 +1,340 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +====================================================== +Blacklist various Python imports known to be dangerous +====================================================== + +This blacklist data checks for a number of Python modules known to have +possible security implications. The following blacklist tests are run against +any import statements or calls encountered in the scanned code base. + +Note that the XML rules listed here are mostly based off of Christian Heimes' +work on defusedxml: https://pypi.org/project/defusedxml/ + +B401: import_telnetlib +---------------------- + +A telnet-related module is being imported. Telnet is considered insecure. Use +SSH or some other encrypted protocol. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B401 | import_telnetlib | - telnetlib | high | ++------+---------------------+------------------------------------+-----------+ + +B402: import_ftplib +------------------- +A FTP-related module is being imported. FTP is considered insecure. Use +SSH/SFTP/SCP or some other encrypted protocol. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B402 | inport_ftplib | - ftplib | high | ++------+---------------------+------------------------------------+-----------+ + +B403: import_pickle +------------------- + +Consider possible security implications associated with these modules. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B403 | import_pickle | - pickle | low | +| | | - cPickle | | +| | | - dill | | ++------+---------------------+------------------------------------+-----------+ + +B404: import_subprocess +----------------------- + +Consider possible security implications associated with these modules. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B404 | import_subprocess | - subprocess | low | ++------+---------------------+------------------------------------+-----------+ + + +B405: import_xml_etree +---------------------- + +Using various methods to parse untrusted XML data is known to be vulnerable to +XML attacks. Replace vulnerable imports with the equivalent defusedxml package, +or make sure defusedxml.defuse_stdlib() is called. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B405 | import_xml_etree | - xml.etree.cElementTree | low | +| | | - xml.etree.ElementTree | | ++------+---------------------+------------------------------------+-----------+ + +B406: import_xml_sax +-------------------- + +Using various methods to parse untrusted XML data is known to be vulnerable to +XML attacks. Replace vulnerable imports with the equivalent defusedxml package, +or make sure defusedxml.defuse_stdlib() is called. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B406 | import_xml_sax | - xml.sax | low | ++------+---------------------+------------------------------------+-----------+ + +B407: import_xml_expat +---------------------- + +Using various methods to parse untrusted XML data is known to be vulnerable to +XML attacks. Replace vulnerable imports with the equivalent defusedxml package, +or make sure defusedxml.defuse_stdlib() is called. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B407 | import_xml_expat | - xml.dom.expatbuilder | low | ++------+---------------------+------------------------------------+-----------+ + +B408: import_xml_minidom +------------------------ + +Using various methods to parse untrusted XML data is known to be vulnerable to +XML attacks. Replace vulnerable imports with the equivalent defusedxml package, +or make sure defusedxml.defuse_stdlib() is called. + + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B408 | import_xml_minidom | - xml.dom.minidom | low | ++------+---------------------+------------------------------------+-----------+ + +B409: import_xml_pulldom +------------------------ + +Using various methods to parse untrusted XML data is known to be vulnerable to +XML attacks. Replace vulnerable imports with the equivalent defusedxml package, +or make sure defusedxml.defuse_stdlib() is called. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B409 | import_xml_pulldom | - xml.dom.pulldom | low | ++------+---------------------+------------------------------------+-----------+ + +B410: import_lxml +----------------- + +Using various methods to parse untrusted XML data is known to be vulnerable to +XML attacks. Replace vulnerable imports with the equivalent defusedxml package. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B410 | import_lxml | - lxml | low | ++------+---------------------+------------------------------------+-----------+ + +B411: import_xmlrpclib +---------------------- + +XMLRPC is particularly dangerous as it is also concerned with communicating +data over a network. Use defused.xmlrpc.monkey_patch() function to monkey-patch +xmlrpclib and mitigate remote XML attacks. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B411 | import_xmlrpclib | - xmlrpclib | high | ++------+---------------------+------------------------------------+-----------+ + +B412: import_httpoxy +-------------------- +httpoxy is a set of vulnerabilities that affect application code running in +CGI, or CGI-like environments. The use of CGI for web applications should be +avoided to prevent this class of attack. More details are available +at https://httpoxy.org/. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B412 | import_httpoxy | - wsgiref.handlers.CGIHandler | high | +| | | - twisted.web.twcgi.CGIScript | | ++------+---------------------+------------------------------------+-----------+ + +B413: import_pycrypto +--------------------- +pycrypto library is known to have publicly disclosed buffer overflow +vulnerability https://github.com/dlitz/pycrypto/issues/176. It is no longer +actively maintained and has been deprecated in favor of pyca/cryptography +library. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B413 | import_pycrypto | - Crypto.Cipher | high | +| | | - Crypto.Hash | | +| | | - Crypto.IO | | +| | | - Crypto.Protocol | | +| | | - Crypto.PublicKey | | +| | | - Crypto.Random | | +| | | - Crypto.Signature | | +| | | - Crypto.Util | | ++------+---------------------+------------------------------------+-----------+ + +B414: import_pycryptodome +------------------------- +pycryptodome is a direct fork of pycrypto that has not fully addressed +the issues inherent in PyCrypto. It seems to exist, mainly, as an API +compatible continuation of pycrypto and should be deprecated in favor +of pyca/cryptography which has more support among the Python community. + ++------+---------------------+------------------------------------+-----------+ +| ID | Name | Imports | Severity | ++======+=====================+====================================+===========+ +| B414 | import_pycryptodome | - Cryptodome.Cipher | high | +| | | - Cryptodome.Hash | | +| | | - Cryptodome.IO | | +| | | - Cryptodome.Protocol | | +| | | - Cryptodome.PublicKey | | +| | | - Cryptodome.Random | | +| | | - Cryptodome.Signature | | +| | | - Cryptodome.Util | | ++------+---------------------+------------------------------------+-----------+ + +""" + +from bandit.blacklists import utils + + +def gen_blacklist(): + """Generate a list of items to blacklist. + + Methods of this type, "bandit.blacklist" plugins, are used to build a list + of items that bandit's built in blacklisting tests will use to trigger + issues. They replace the older blacklist* test plugins and allow + blacklisted items to have a unique bandit ID for filtering and profile + usage. + + :return: a dictionary mapping node types to a list of blacklist data + """ + + sets = [] + sets.append(utils.build_conf_dict( + 'import_telnetlib', 'B401', ['telnetlib'], + 'A telnet-related module is being imported. Telnet is ' + 'considered insecure. Use SSH or some other encrypted protocol.', + 'HIGH' + )) + + sets.append(utils.build_conf_dict( + 'import_ftplib', 'B402', ['ftplib'], + 'A FTP-related module is being imported. FTP is considered ' + 'insecure. Use SSH/SFTP/SCP or some other encrypted protocol.', + 'HIGH' + )) + + sets.append(utils.build_conf_dict( + 'import_pickle', 'B403', ['pickle', 'cPickle', 'dill'], + 'Consider possible security implications associated with ' + '{name} module.', 'LOW' + )) + + sets.append(utils.build_conf_dict( + 'import_subprocess', 'B404', ['subprocess'], + 'Consider possible security implications associated with ' + '{name} module.', 'LOW' + )) + + # Most of this is based off of Christian Heimes' work on defusedxml: + # https://pypi.org/project/defusedxml/#defusedxml-sax + + xml_msg = ('Using {name} to parse untrusted XML data is known to be ' + 'vulnerable to XML attacks. Replace {name} with the equivalent ' + 'defusedxml package, or make sure defusedxml.defuse_stdlib() ' + 'is called.') + lxml_msg = ('Using {name} to parse untrusted XML data is known to be ' + 'vulnerable to XML attacks. Replace {name} with the ' + 'equivalent defusedxml package.') + + sets.append(utils.build_conf_dict( + 'import_xml_etree', 'B405', + ['xml.etree.cElementTree', 'xml.etree.ElementTree'], xml_msg, 'LOW')) + + sets.append(utils.build_conf_dict( + 'import_xml_sax', 'B406', ['xml.sax'], xml_msg, 'LOW')) + + sets.append(utils.build_conf_dict( + 'import_xml_expat', 'B407', ['xml.dom.expatbuilder'], xml_msg, 'LOW')) + + sets.append(utils.build_conf_dict( + 'import_xml_minidom', 'B408', ['xml.dom.minidom'], xml_msg, 'LOW')) + + sets.append(utils.build_conf_dict( + 'import_xml_pulldom', 'B409', ['xml.dom.pulldom'], xml_msg, 'LOW')) + + sets.append(utils.build_conf_dict( + 'import_lxml', 'B410', ['lxml'], lxml_msg, 'LOW')) + + sets.append(utils.build_conf_dict( + 'import_xmlrpclib', 'B411', ['xmlrpclib'], + 'Using {name} to parse untrusted XML data is known to be ' + 'vulnerable to XML attacks. Use defused.xmlrpc.monkey_patch() ' + 'function to monkey-patch xmlrpclib and mitigate XML ' + 'vulnerabilities.', 'HIGH')) + + sets.append(utils.build_conf_dict( + 'import_httpoxy', 'B412', + ['wsgiref.handlers.CGIHandler', 'twisted.web.twcgi.CGIScript', + 'twisted.web.twcgi.CGIDirectory'], + 'Consider possible security implications associated with ' + '{name} module.', 'HIGH' + )) + + sets.append(utils.build_conf_dict( + 'import_pycrypto', 'B413', + ['Crypto.Cipher', + 'Crypto.Hash', + 'Crypto.IO', + 'Crypto.Protocol', + 'Crypto.PublicKey', + 'Crypto.Random', + 'Crypto.Signature', + 'Crypto.Util'], + 'The pyCrypto library and its module {name} are no longer actively ' + 'maintained and have been deprecated. ' + 'Consider using pyca/cryptography library.', 'HIGH')) + + sets.append(utils.build_conf_dict( + 'import_pycryptodome', 'B414', + ['Cryptodome.Cipher', + 'Cryptodome.Hash', + 'Cryptodome.IO', + 'Cryptodome.Protocol', + 'Cryptodome.PublicKey', + 'Cryptodome.Random', + 'Cryptodome.Signature', + 'Cryptodome.Util'], + 'The pycryptodome library is not considered a secure alternative ' + 'to pycrypto.' + 'Consider using pyca/cryptography library.', 'HIGH')) + + return {'Import': sets, 'ImportFrom': sets, 'Call': sets} diff --git a/env_27/lib/python2.7/site-packages/bandit/blacklists/utils.py b/env_27/lib/python2.7/site-packages/bandit/blacklists/utils.py new file mode 100644 index 0000000..f14e487 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/blacklists/utils.py @@ -0,0 +1,22 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def build_conf_dict(name, bid, qualnames, message, level='MEDIUM'): + """Build and return a blacklist configuration dict.""" + + return {'name': name, 'id': bid, 'message': message, + 'qualnames': qualnames, 'level': level} diff --git a/env_27/lib/python2.7/site-packages/bandit/cli/__init__.py b/env_27/lib/python2.7/site-packages/bandit/cli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/env_27/lib/python2.7/site-packages/bandit/cli/baseline.py b/env_27/lib/python2.7/site-packages/bandit/cli/baseline.py new file mode 100644 index 0000000..fcc3d08 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/cli/baseline.py @@ -0,0 +1,224 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2015 Hewlett-Packard Enterprise +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# ############################################################################# +# Bandit Baseline is a tool that runs Bandit against a Git commit, and compares +# the current commit findings to the parent commit findings. + +# To do this it checks out the parent commit, runs Bandit (with any provided +# filters or profiles), checks out the current commit, runs Bandit, and then +# reports on any new findings. +# ############################################################################# + +import argparse +import contextlib +import logging +import os +import shutil +import subprocess +import sys +import tempfile + +import git + +bandit_args = sys.argv[1:] +baseline_tmp_file = '_bandit_baseline_run.json_' +current_commit = None +default_output_format = 'terminal' +LOG = logging.getLogger(__name__) +repo = None +report_basename = 'bandit_baseline_result' +valid_baseline_formats = ['txt', 'html', 'json'] + + +def main(): + # our cleanup function needs this and can't be passed arguments + global current_commit + global repo + + parent_commit = None + output_format = None + repo = None + report_fname = None + + init_logger() + + output_format, repo, report_fname = initialize() + + if not repo: + sys.exit(2) + + # #################### Find current and parent commits #################### + try: + commit = repo.commit() + current_commit = commit.hexsha + LOG.info('Got current commit: [%s]', commit.name_rev) + + commit = commit.parents[0] + parent_commit = commit.hexsha + LOG.info('Got parent commit: [%s]', commit.name_rev) + + except git.GitCommandError: + LOG.error("Unable to get current or parent commit") + sys.exit(2) + except IndexError: + LOG.error("Parent commit not available") + sys.exit(2) + + # #################### Run Bandit against both commits #################### + output_type = (['-f', 'txt'] if output_format == default_output_format + else ['-o', report_fname]) + + with baseline_setup() as t: + + bandit_tmpfile = "{}/{}".format(t, baseline_tmp_file) + + steps = [{'message': 'Getting Bandit baseline results', + 'commit': parent_commit, + 'args': bandit_args + ['-f', 'json', '-o', bandit_tmpfile]}, + + {'message': 'Comparing Bandit results to baseline', + 'commit': current_commit, + 'args': bandit_args + ['-b', bandit_tmpfile] + output_type}] + + return_code = None + + for step in steps: + repo.head.reset(commit=step['commit'], working_tree=True) + + LOG.info(step['message']) + + bandit_command = ['bandit'] + step['args'] + + try: + output = subprocess.check_output(bandit_command) + except subprocess.CalledProcessError as e: + output = e.output + return_code = e.returncode + else: + return_code = 0 + output = output.decode('utf-8') # subprocess returns bytes + + if return_code not in [0, 1]: + LOG.error("Error running command: %s\nOutput: %s\n", + bandit_args, output) + + # #################### Output and exit #################################### + # print output or display message about written report + if output_format == default_output_format: + print(output) + else: + LOG.info("Successfully wrote %s", report_fname) + + # exit with the code the last Bandit run returned + sys.exit(return_code) + + +# #################### Clean up before exit ################################### +@contextlib.contextmanager +def baseline_setup(): + d = tempfile.mkdtemp() + yield d + shutil.rmtree(d, True) + + if repo: + repo.head.reset(commit=current_commit, working_tree=True) + + +# #################### Setup logging ########################################## +def init_logger(): + LOG.handlers = [] + log_level = logging.INFO + log_format_string = "[%(levelname)7s ] %(message)s" + logging.captureWarnings(True) + LOG.setLevel(log_level) + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(logging.Formatter(log_format_string)) + LOG.addHandler(handler) + + +# #################### Perform initialization and validate assumptions ######## +def initialize(): + valid = True + + # #################### Parse Args ######################################### + parser = argparse.ArgumentParser( + description='Bandit Baseline - Generates Bandit results compared to "' + 'a baseline', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog='Additional Bandit arguments such as severity filtering (-ll) ' + 'can be added and will be passed to Bandit.' + ) + + parser.add_argument('targets', metavar='targets', type=str, nargs='+', + help='source file(s) or directory(s) to be tested') + + parser.add_argument('-f', dest='output_format', action='store', + default='terminal', help='specify output format', + choices=valid_baseline_formats) + + args, unknown = parser.parse_known_args() + + # #################### Setup Output ####################################### + # set the output format, or use a default if not provided + output_format = (args.output_format if args.output_format + else default_output_format) + + if output_format == default_output_format: + LOG.info("No output format specified, using %s", default_output_format) + + # set the report name based on the output format + report_fname = "{}.{}".format(report_basename, output_format) + + # #################### Check Requirements ################################# + try: + repo = git.Repo(os.getcwd()) + + except git.exc.InvalidGitRepositoryError: + LOG.error("Bandit baseline must be called from a git project root") + valid = False + + except git.exc.GitCommandNotFound: + LOG.error("Git command not found") + valid = False + + else: + if repo.is_dirty(): + LOG.error("Current working directory is dirty and must be " + "resolved") + valid = False + + # if output format is specified, we need to be able to write the report + if output_format != default_output_format and os.path.exists(report_fname): + LOG.error("File %s already exists, aborting", report_fname) + valid = False + + # Bandit needs to be able to create this temp file + if os.path.exists(baseline_tmp_file): + LOG.error("Temporary file %s needs to be removed prior to running", + baseline_tmp_file) + valid = False + + # we must validate -o is not provided, as it will mess up Bandit baseline + if '-o' in bandit_args: + LOG.error("Bandit baseline must not be called with the -o option") + valid = False + + return (output_format, repo, report_fname) if valid else (None, None, None) + + +if __name__ == '__main__': + main() diff --git a/env_27/lib/python2.7/site-packages/bandit/cli/config_generator.py b/env_27/lib/python2.7/site-packages/bandit/cli/config_generator.py new file mode 100644 index 0000000..5d532d0 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/cli/config_generator.py @@ -0,0 +1,186 @@ +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from __future__ import print_function + +import argparse +import importlib +import logging +import os +import sys + +import yaml + +from bandit.core import extension_loader + +PROG_NAME = 'bandit_conf_generator' +LOG = logging.getLogger(__name__) + + +template = """ +### Bandit config file generated from: +# '{cli}' + +### This config may optionally select a subset of tests to run or skip by +### filling out the 'tests' and 'skips' lists given below. If no tests are +### specified for inclusion then it is assumed all tests are desired. The skips +### set will remove specific tests from the include set. This can be controlled +### using the -t/-s CLI options. Note that the same test ID should not appear +### in both 'tests' and 'skips', this would be nonsensical and is detected by +### Bandit at runtime. + +# Available tests: +{test_list} + +# (optional) list included test IDs here, eg '[B101, B406]': +{test} + +# (optional) list skipped test IDs here, eg '[B101, B406]': +{skip} + +### (optional) plugin settings - some test plugins require configuration data +### that may be given here, per-plugin. All bandit test plugins have a built in +### set of sensible defaults and these will be used if no configuration is +### provided. It is not necessary to provide settings for every (or any) plugin +### if the defaults are acceptable. + +{settings} +""" + + +def init_logger(): + LOG.handlers = [] + log_level = logging.INFO + log_format_string = "[%(levelname)5s]: %(message)s" + logging.captureWarnings(True) + LOG.setLevel(log_level) + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(logging.Formatter(log_format_string)) + LOG.addHandler(handler) + + +def parse_args(): + help_description = """Bandit Config Generator + + This tool is used to generate an optional profile. The profile may be used + to include or skip tests and override values for plugins. + + When used to store an output profile, this tool will output a template that + includes all plugins and their default settings. Any settings which aren't + being overridden can be safely removed from the profile and default values + will be used. Bandit will prefer settings from the profile over the built + in values.""" + + parser = argparse.ArgumentParser( + description=help_description, + formatter_class=argparse.RawTextHelpFormatter) + + parser.add_argument('--show-defaults', dest='show_defaults', + action='store_true', + help='show the default settings values for each ' + 'plugin but do not output a profile') + parser.add_argument('-o', '--out', dest='output_file', + action='store', + help='output file to save profile') + parser.add_argument( + '-t', '--tests', dest='tests', + action='store', default=None, type=str, + help='list of test names to run') + parser.add_argument( + '-s', '--skip', dest='skips', + action='store', default=None, type=str, + help='list of test names to skip') + args = parser.parse_args() + + if not args.output_file and not args.show_defaults: + parser.print_help() + parser.exit(1) + + return args + + +def get_config_settings(): + config = {} + for plugin in extension_loader.MANAGER.plugins: + fn_name = plugin.name + function = plugin.plugin + + # if a function takes config... + if hasattr(function, '_takes_config'): + fn_module = importlib.import_module(function.__module__) + + # call the config generator if it exists + if hasattr(fn_module, 'gen_config'): + config[fn_name] = fn_module.gen_config(function._takes_config) + + return yaml.safe_dump(config, default_flow_style=False) + + +def main(): + init_logger() + args = parse_args() + + yaml_settings = get_config_settings() + + if args.show_defaults: + print(yaml_settings) + + if args.output_file: + if os.path.exists(os.path.abspath(args.output_file)): + LOG.error("File %s already exists, exiting", args.output_file) + sys.exit(2) + + try: + with open(args.output_file, 'w') as f: + skips = args.skips.split(',') if args.skips else [] + tests = args.tests.split(',') if args.tests else [] + + for skip in skips: + if not extension_loader.MANAGER.check_id(skip): + raise RuntimeError('unknown ID in skips: %s' % skip) + + for test in tests: + if not extension_loader.MANAGER.check_id(test): + raise RuntimeError('unknown ID in tests: %s' % test) + + tpl = "# {0} : {1}" + test_list = [tpl.format(t.plugin._test_id, t.name) + for t in extension_loader.MANAGER.plugins] + + others = [tpl.format(k, v['name']) for k, v in ( + extension_loader.MANAGER.blacklist_by_id.items())] + test_list.extend(others) + test_list.sort() + + contents = template.format( + cli=" ".join(sys.argv), + settings=yaml_settings, + test_list="\n".join(test_list), + skip='skips: ' + str(skips) if skips else 'skips:', + test='tests: ' + str(tests) if tests else 'tests:') + f.write(contents) + + except IOError: + LOG.error("Unable to open %s for writing", args.output_file) + + except Exception as e: + LOG.error("Error: %s", e) + + else: + LOG.info("Successfully wrote profile: %s", args.output_file) + + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/env_27/lib/python2.7/site-packages/bandit/cli/main.py b/env_27/lib/python2.7/site-packages/bandit/cli/main.py new file mode 100644 index 0000000..ccec0ee --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/cli/main.py @@ -0,0 +1,404 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import argparse +import fnmatch +import logging +import os +import sys +import textwrap + + +import bandit +from bandit.core import config as b_config +from bandit.core import constants +from bandit.core import manager as b_manager +from bandit.core import utils + + +BASE_CONFIG = 'bandit.yaml' +LOG = logging.getLogger() + + +def _init_logger(debug=False, log_format=None): + '''Initialize the logger + + :param debug: Whether to enable debug mode + :return: An instantiated logging instance + ''' + LOG.handlers = [] + log_level = logging.INFO + if debug: + log_level = logging.DEBUG + + if not log_format: + # default log format + log_format_string = constants.log_format_string + else: + log_format_string = log_format + + logging.captureWarnings(True) + + LOG.setLevel(log_level) + handler = logging.StreamHandler(sys.stderr) + handler.setFormatter(logging.Formatter(log_format_string)) + LOG.addHandler(handler) + LOG.debug("logging initialized") + + +def _get_options_from_ini(ini_path, target): + """Return a dictionary of config options or None if we can't load any.""" + ini_file = None + + if ini_path: + ini_file = ini_path + else: + bandit_files = [] + + for t in target: + for root, dirnames, filenames in os.walk(t): + for filename in fnmatch.filter(filenames, '.bandit'): + bandit_files.append(os.path.join(root, filename)) + + if len(bandit_files) > 1: + LOG.error('Multiple .bandit files found - scan separately or ' + 'choose one with --ini\n\t%s', ', '.join(bandit_files)) + sys.exit(2) + + elif len(bandit_files) == 1: + ini_file = bandit_files[0] + LOG.info('Found project level .bandit file: %s', bandit_files[0]) + + if ini_file: + return utils.parse_ini_file(ini_file) + else: + return None + + +def _init_extensions(): + from bandit.core import extension_loader as ext_loader + return ext_loader.MANAGER + + +def _log_option_source(arg_val, ini_val, option_name): + """It's useful to show the source of each option.""" + if arg_val: + LOG.info("Using command line arg for %s", option_name) + return arg_val + elif ini_val: + LOG.info("Using ini file for %s", option_name) + return ini_val + else: + return None + + +def _running_under_virtualenv(): + if hasattr(sys, 'real_prefix'): + return True + elif sys.prefix != getattr(sys, 'base_prefix', sys.prefix): + return True + + +def _get_profile(config, profile_name, config_path): + profile = {} + if profile_name: + profiles = config.get_option('profiles') or {} + profile = profiles.get(profile_name) + if profile is None: + raise utils.ProfileNotFound(config_path, profile_name) + LOG.debug("read in legacy profile '%s': %s", profile_name, profile) + else: + profile['include'] = set(config.get_option('tests') or []) + profile['exclude'] = set(config.get_option('skips') or []) + return profile + + +def _log_info(args, profile): + inc = ",".join([t for t in profile['include']]) or "None" + exc = ",".join([t for t in profile['exclude']]) or "None" + LOG.info("profile include tests: %s", inc) + LOG.info("profile exclude tests: %s", exc) + LOG.info("cli include tests: %s", args.tests) + LOG.info("cli exclude tests: %s", args.skips) + + +def main(): + # bring our logging stuff up as early as possible + debug = ('-d' in sys.argv or '--debug' in sys.argv) + _init_logger(debug) + extension_mgr = _init_extensions() + + baseline_formatters = [f.name for f in filter(lambda x: + hasattr(x.plugin, + '_accepts_baseline'), + extension_mgr.formatters)] + + # now do normal startup + parser = argparse.ArgumentParser( + description='Bandit - a Python source code security analyzer', + formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument( + 'targets', metavar='targets', type=str, nargs='*', + help='source file(s) or directory(s) to be tested' + ) + parser.add_argument( + '-r', '--recursive', dest='recursive', + action='store_true', help='find and process files in subdirectories' + ) + parser.add_argument( + '-a', '--aggregate', dest='agg_type', + action='store', default='file', type=str, + choices=['file', 'vuln'], + help='aggregate output by vulnerability (default) or by filename' + ) + parser.add_argument( + '-n', '--number', dest='context_lines', + action='store', default=3, type=int, + help='maximum number of code lines to output for each issue' + ) + parser.add_argument( + '-c', '--configfile', dest='config_file', + action='store', default=None, type=str, + help='optional config file to use for selecting plugins and ' + 'overriding defaults' + ) + parser.add_argument( + '-p', '--profile', dest='profile', + action='store', default=None, type=str, + help='profile to use (defaults to executing all tests)' + ) + parser.add_argument( + '-t', '--tests', dest='tests', + action='store', default=None, type=str, + help='comma-separated list of test IDs to run' + ) + parser.add_argument( + '-s', '--skip', dest='skips', + action='store', default=None, type=str, + help='comma-separated list of test IDs to skip' + ) + parser.add_argument( + '-l', '--level', dest='severity', action='count', + default=1, help='report only issues of a given severity level or ' + 'higher (-l for LOW, -ll for MEDIUM, -lll for HIGH)' + ) + parser.add_argument( + '-i', '--confidence', dest='confidence', action='count', + default=1, help='report only issues of a given confidence level or ' + 'higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)' + ) + output_format = 'screen' if sys.stdout.isatty() else 'txt' + parser.add_argument( + '-f', '--format', dest='output_format', action='store', + default=output_format, help='specify output format', + choices=sorted(extension_mgr.formatter_names) + ) + parser.add_argument( + '--msg-template', action='store', + default=None, help='specify output message template' + ' (only usable with --format custom),' + ' see CUSTOM FORMAT section' + ' for list of available values', + ) + parser.add_argument( + '-o', '--output', dest='output_file', action='store', nargs='?', + type=argparse.FileType('w'), default=sys.stdout, + help='write report to filename' + ) + parser.add_argument( + '-v', '--verbose', dest='verbose', action='store_true', + help='output extra information like excluded and included files' + ) + parser.add_argument( + '-d', '--debug', dest='debug', action='store_true', + help='turn on debug mode' + ) + parser.add_argument( + '--ignore-nosec', dest='ignore_nosec', action='store_true', + help='do not skip lines with # nosec comments' + ) + parser.add_argument( + '-x', '--exclude', dest='excluded_paths', action='store', + default='', help='comma-separated list of paths to exclude from scan ' + '(note that these are in addition to the excluded ' + 'paths provided in the config file)' + ) + parser.add_argument( + '-b', '--baseline', dest='baseline', action='store', + default=None, help='path of a baseline report to compare against ' + '(only JSON-formatted files are accepted)' + ) + parser.add_argument( + '--ini', dest='ini_path', action='store', default=None, + help='path to a .bandit file that supplies command line arguments' + ) + python_ver = sys.version.replace('\n', '') + parser.add_argument( + '--version', action='version', + version='%(prog)s {version}\n python version = {python}'.format( + version=bandit.__version__, python=python_ver) + ) + + parser.set_defaults(debug=False) + parser.set_defaults(verbose=False) + parser.set_defaults(ignore_nosec=False) + + plugin_info = ["%s\t%s" % (a[0], a[1].name) for a in + extension_mgr.plugins_by_id.items()] + blacklist_info = [] + for a in extension_mgr.blacklist.items(): + for b in a[1]: + blacklist_info.append('%s\t%s' % (b['id'], b['name'])) + + plugin_list = '\n\t'.join(sorted(set(plugin_info + blacklist_info))) + dedent_text = textwrap.dedent(''' + CUSTOM FORMATTING + ----------------- + + Available tags: + + {abspath}, {relpath}, {line}, {test_id}, + {severity}, {msg}, {confidence}, {range} + + Example usage: + + Default template: + bandit -r examples/ --format custom --msg-template \\ + "{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}" + + Provides same output as: + bandit -r examples/ --format custom + + Tags can also be formatted in python string.format() style: + bandit -r examples/ --format custom --msg-template \\ + "{relpath:20.20s}: {line:03}: {test_id:^8}: DEFECT: {msg:>20}" + + See python documentation for more information about formatting style: + https://docs.python.org/3.4/library/string.html + + The following tests were discovered and loaded: + ----------------------------------------------- + ''') + parser.epilog = dedent_text + "\t{0}".format(plugin_list) + + # setup work - parse arguments, and initialize BanditManager + args = parser.parse_args() + # Check if `--msg-template` is not present without custom formatter + if args.output_format != 'custom' and args.msg_template is not None: + parser.error("--msg-template can only be used with --format=custom") + + try: + b_conf = b_config.BanditConfig(config_file=args.config_file) + except utils.ConfigError as e: + LOG.error(e) + sys.exit(2) + + # Handle .bandit files in projects to pass cmdline args from file + ini_options = _get_options_from_ini(args.ini_path, args.targets) + if ini_options: + # prefer command line, then ini file + args.excluded_paths = _log_option_source(args.excluded_paths, + ini_options.get('exclude'), + 'excluded paths') + + args.skips = _log_option_source(args.skips, ini_options.get('skips'), + 'skipped tests') + + args.tests = _log_option_source(args.tests, ini_options.get('tests'), + 'selected tests') + ini_targets = ini_options.get('targets') + if ini_targets: + ini_targets = ini_targets.split(',') + args.targets = _log_option_source(args.targets, ini_targets, + 'selected targets') + # TODO(tmcpeak): any other useful options to pass from .bandit? + + if not args.targets: + LOG.error("No targets found in CLI or ini files, exiting.") + sys.exit(2) + # if the log format string was set in the options, reinitialize + if b_conf.get_option('log_format'): + log_format = b_conf.get_option('log_format') + _init_logger(debug, log_format=log_format) + + try: + profile = _get_profile(b_conf, args.profile, args.config_file) + _log_info(args, profile) + + profile['include'].update(args.tests.split(',') if args.tests else []) + profile['exclude'].update(args.skips.split(',') if args.skips else []) + extension_mgr.validate_profile(profile) + + except (utils.ProfileNotFound, ValueError) as e: + LOG.error(e) + sys.exit(2) + + b_mgr = b_manager.BanditManager(b_conf, args.agg_type, args.debug, + profile=profile, verbose=args.verbose, + ignore_nosec=args.ignore_nosec) + + if args.baseline is not None: + try: + with open(args.baseline) as bl: + data = bl.read() + b_mgr.populate_baseline(data) + except IOError: + LOG.warning("Could not open baseline report: %s", args.baseline) + sys.exit(2) + + if args.output_format not in baseline_formatters: + LOG.warning('Baseline must be used with one of the following ' + 'formats: ' + str(baseline_formatters)) + sys.exit(2) + + if args.output_format != "json": + if args.config_file: + LOG.info("using config: %s", args.config_file) + + LOG.info("running on Python %d.%d.%d", sys.version_info.major, + sys.version_info.minor, sys.version_info.micro) + + # initiate file discovery step within Bandit Manager + b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths) + + if not b_mgr.b_ts.tests: + LOG.error('No tests would be run, please check the profile.') + sys.exit(2) + + # initiate execution of tests within Bandit Manager + b_mgr.run_tests() + LOG.debug(b_mgr.b_ma) + LOG.debug(b_mgr.metrics) + + # trigger output of results by Bandit Manager + sev_level = constants.RANKING[args.severity - 1] + conf_level = constants.RANKING[args.confidence - 1] + b_mgr.output_results(args.context_lines, + sev_level, + conf_level, + args.output_file, + args.output_format, + args.msg_template) + + # return an exit code of 1 if there are results, 0 otherwise + if b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0: + sys.exit(1) + else: + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/env_27/lib/python2.7/site-packages/bandit/core/__init__.py b/env_27/lib/python2.7/site-packages/bandit/core/__init__.py new file mode 100644 index 0000000..319eae5 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/__init__.py @@ -0,0 +1,27 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from bandit.core import config # noqa +from bandit.core import context # noqa +from bandit.core import manager # noqa +from bandit.core import meta_ast # noqa +from bandit.core import node_visitor # noqa +from bandit.core import test_set # noqa +from bandit.core import tester # noqa +from bandit.core import utils # noqa +from bandit.core.constants import * # noqa +from bandit.core.issue import * # noqa +from bandit.core.test_properties import * # noqa diff --git a/env_27/lib/python2.7/site-packages/bandit/core/blacklisting.py b/env_27/lib/python2.7/site-packages/bandit/core/blacklisting.py new file mode 100644 index 0000000..114f179 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/blacklisting.py @@ -0,0 +1,76 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ast +import fnmatch + +from bandit.core import issue + + +def report_issue(check, name): + return issue.Issue( + severity=check.get('level', 'MEDIUM'), confidence='HIGH', + text=check['message'].replace('{name}', name), + ident=name, test_id=check.get("id", 'LEGACY')) + + +def blacklist(context, config): + """Generic blacklist test, B001. + + This generic blacklist test will be called for any encountered node with + defined blacklist data available. This data is loaded via plugins using + the 'bandit.blacklists' entry point. Please see the documentation for more + details. Each blacklist datum has a unique bandit ID that may be used for + filtering purposes, or alternatively all blacklisting can be filtered using + the id of this built in test, 'B001'. + """ + blacklists = config + node_type = context.node.__class__.__name__ + + if node_type == 'Call': + func = context.node.func + if isinstance(func, ast.Name) and func.id == '__import__': + if len(context.node.args): + if isinstance(context.node.args[0], ast.Str): + name = context.node.args[0].s + else: + # TODO(??): import through a variable, need symbol tab + name = "UNKNOWN" + else: + name = "" # handle '__import__()' + else: + name = context.call_function_name_qual + # In the case the Call is an importlib.import, treat the first + # argument name as an actual import module name. + # Will produce None if argument is not a literal or identifier + if name in ["importlib.import_module", "importlib.__import__"]: + name = context.call_args[0] + for check in blacklists[node_type]: + for qn in check['qualnames']: + if name is not None and fnmatch.fnmatch(name, qn): + return report_issue(check, name) + + if node_type.startswith('Import'): + prefix = "" + if node_type == "ImportFrom": + if context.node.module is not None: + prefix = context.node.module + "." + + for check in blacklists[node_type]: + for name in context.node.names: + for qn in check['qualnames']: + if (prefix + name.name).startswith(qn): + return report_issue(check, name.name) diff --git a/env_27/lib/python2.7/site-packages/bandit/core/config.py b/env_27/lib/python2.7/site-packages/bandit/core/config.py new file mode 100644 index 0000000..7369f06 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/config.py @@ -0,0 +1,243 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +import yaml + +from bandit.core import constants +from bandit.core import extension_loader +from bandit.core import utils + + +LOG = logging.getLogger(__name__) + + +class BanditConfig(object): + def __init__(self, config_file=None): + '''Attempt to initialize a config dictionary from a yaml file. + + Error out if loading the yaml file fails for any reason. + :param config_file: The Bandit yaml config file + + :raises bandit.utils.ConfigError: If the config is invalid or + unreadable. + ''' + self.config_file = config_file + self._config = {} + + if config_file: + try: + f = open(config_file, 'r') + except IOError: + raise utils.ConfigError("Could not read config file.", + config_file) + + try: + self._config = yaml.safe_load(f) + self.validate(config_file) + except yaml.YAMLError as err: + LOG.error(err) + raise utils.ConfigError("Error parsing file.", config_file) + + # valid config must be a dict + if not isinstance(self._config, dict): + raise utils.ConfigError("Error parsing file.", config_file) + + self.convert_legacy_config() + + else: + # use sane defaults + self._config['plugin_name_pattern'] = '*.py' + self._config['include'] = ['*.py', '*.pyw'] + + self._init_settings() + + def get_option(self, option_string): + '''Returns the option from the config specified by the option_string. + + '.' can be used to denote levels, for example to retrieve the options + from the 'a' profile you can use 'profiles.a' + :param option_string: The string specifying the option to retrieve + :return: The object specified by the option_string, or None if it can't + be found. + ''' + option_levels = option_string.split('.') + cur_item = self._config + for level in option_levels: + if cur_item and (level in cur_item): + cur_item = cur_item[level] + else: + return None + + return cur_item + + def get_setting(self, setting_name): + if setting_name in self._settings: + return self._settings[setting_name] + else: + return None + + @property + def config(self): + '''Property to return the config dictionary + + :return: Config dictionary + ''' + return self._config + + def _init_settings(self): + '''This function calls a set of other functions (one per setting) + + This function calls a set of other functions (one per setting) to build + out the _settings dictionary. Each other function will set values from + the config (if set), otherwise use defaults (from constants if + possible). + :return: - + ''' + self._settings = {} + self._init_plugin_name_pattern() + + def _init_plugin_name_pattern(self): + '''Sets settings['plugin_name_pattern'] from default or config file.''' + plugin_name_pattern = constants.plugin_name_pattern + if self.get_option('plugin_name_pattern'): + plugin_name_pattern = self.get_option('plugin_name_pattern') + self._settings['plugin_name_pattern'] = plugin_name_pattern + + def convert_legacy_config(self): + updated_profiles = self.convert_names_to_ids() + bad_calls, bad_imports = self.convert_legacy_blacklist_data() + + if updated_profiles: + self.convert_legacy_blacklist_tests(updated_profiles, + bad_calls, bad_imports) + self._config['profiles'] = updated_profiles + + def convert_names_to_ids(self): + '''Convert test names to IDs, unknown names are left unchanged.''' + extman = extension_loader.MANAGER + + updated_profiles = {} + for name, profile in (self.get_option('profiles') or {}).items(): + # NOTE(tkelsey): can't use default of get() because value is + # sometimes explicity 'None', for example when the list if given in + # yaml but not populated with any values. + include = set((extman.get_plugin_id(i) or i) + for i in (profile.get('include') or [])) + exclude = set((extman.get_plugin_id(i) or i) + for i in (profile.get('exclude') or [])) + updated_profiles[name] = {'include': include, 'exclude': exclude} + return updated_profiles + + def convert_legacy_blacklist_data(self): + '''Detect legacy blacklist data and convert it to new format.''' + bad_calls_list = [] + bad_imports_list = [] + + bad_calls = self.get_option('blacklist_calls') or {} + bad_calls = bad_calls.get('bad_name_sets', {}) + for item in bad_calls: + for key, val in item.items(): + val['name'] = key + val['message'] = val['message'].replace('{func}', '{name}') + bad_calls_list.append(val) + + bad_imports = self.get_option('blacklist_imports') or {} + bad_imports = bad_imports.get('bad_import_sets', {}) + for item in bad_imports: + for key, val in item.items(): + val['name'] = key + val['message'] = val['message'].replace('{module}', '{name}') + val['qualnames'] = val['imports'] + del val['imports'] + bad_imports_list.append(val) + + if bad_imports_list or bad_calls_list: + LOG.warning('Legacy blacklist data found in config, overriding ' + 'data plugins') + return bad_calls_list, bad_imports_list + + @staticmethod + def convert_legacy_blacklist_tests(profiles, bad_imports, bad_calls): + '''Detect old blacklist tests, convert to use new builtin.''' + def _clean_set(name, data): + if name in data: + data.remove(name) + data.add('B001') + + for name, profile in profiles.items(): + blacklist = {} + include = profile['include'] + exclude = profile['exclude'] + + name = 'blacklist_calls' + if name in include and name not in exclude: + blacklist.setdefault('Call', []).extend(bad_calls) + + _clean_set(name, include) + _clean_set(name, exclude) + + name = 'blacklist_imports' + if name in include and name not in exclude: + blacklist.setdefault('Import', []).extend(bad_imports) + blacklist.setdefault('ImportFrom', []).extend(bad_imports) + blacklist.setdefault('Call', []).extend(bad_imports) + + _clean_set(name, include) + _clean_set(name, exclude) + _clean_set('blacklist_import_func', include) + _clean_set('blacklist_import_func', exclude) + + # This can happen with a legacy config that includes + # blacklist_calls but exclude blacklist_imports for example + if 'B001' in include and 'B001' in exclude: + exclude.remove('B001') + + profile['blacklist'] = blacklist + + def validate(self, path): + '''Validate the config data.''' + legacy = False + message = ("Config file has an include or exclude reference " + "to legacy test '{0}' but no configuration data for " + "it. Configuration data is required for this test. " + "Please consider switching to the new config file " + "format, the tool 'bandit-config-generator' can help " + "you with this.") + + def _test(key, block, exclude, include): + if key in exclude or key in include: + if self._config.get(block) is None: + raise utils.ConfigError(message.format(key), path) + + if 'profiles' in self._config: + legacy = True + for profile in self._config['profiles'].values(): + inc = profile.get('include') or set() + exc = profile.get('exclude') or set() + + _test('blacklist_imports', 'blacklist_imports', inc, exc) + _test('blacklist_import_func', 'blacklist_imports', inc, exc) + _test('blacklist_calls', 'blacklist_calls', inc, exc) + + # show deprecation message + if legacy: + LOG.warning("Config file '%s' contains deprecated legacy config " + "data. Please consider upgrading to the new config " + "format. The tool 'bandit-config-generator' can help " + "you with this. Support for legacy configs will be " + "removed in a future bandit version.", path) diff --git a/env_27/lib/python2.7/site-packages/bandit/core/constants.py b/env_27/lib/python2.7/site-packages/bandit/core/constants.py new file mode 100644 index 0000000..dbacc85 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/constants.py @@ -0,0 +1,42 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# default plugin name pattern +plugin_name_pattern = '*.py' + +# default progress increment +progress_increment = 50 + +RANKING = ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH'] +RANKING_VALUES = {'UNDEFINED': 1, 'LOW': 3, 'MEDIUM': 5, 'HIGH': 10} +CRITERIA = [('SEVERITY', 'UNDEFINED'), ('CONFIDENCE', 'UNDEFINED')] + +# add each ranking to globals, to allow direct access in module name space +for rank in RANKING: + globals()[rank] = rank + +CONFIDENCE_DEFAULT = 'UNDEFINED' + +# A list of values Python considers to be False. +# These can be useful in tests to check if a value is True or False. +# We don't handle the case of user-defined classes being false. +# These are only useful when we have a constant in code. If we +# have a variable we cannot determine if False. +# See https://docs.python.org/2/library/stdtypes.html#truth-value-testing +FALSE_VALUES = [None, False, 'False', 0, 0.0, 0j, '', (), [], {}] + +# override with "log_format" option in config file +log_format_string = '[%(module)s]\t%(levelname)s\t%(message)s' diff --git a/env_27/lib/python2.7/site-packages/bandit/core/context.py b/env_27/lib/python2.7/site-packages/bandit/core/context.py new file mode 100644 index 0000000..cf5fe42 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/context.py @@ -0,0 +1,339 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import _ast + +import six + +from bandit.core import utils + + +class Context(object): + def __init__(self, context_object=None): + '''Initialize the class with a context, empty dict otherwise + + :param context_object: The context object to create class from + :return: - + ''' + if context_object is not None: + self._context = context_object + else: + self._context = dict() + + def __repr__(self): + '''Generate representation of object for printing / interactive use + + Most likely only interested in non-default properties, so we return + the string version of _context. + + Example string returned: + , 'function': None, + 'name': 'socket', 'imports': set(['socket']), 'module': None, + 'filename': 'examples/binding.py', + 'call': <_ast.Call object at 0x110252510>, 'lineno': 3, + 'import_aliases': {}, 'qualname': 'socket.socket'}> + + :return: A string representation of the object + ''' + return "" % self._context + + @property + def call_args(self): + '''Get a list of function args + + :return: A list of function args + ''' + args = [] + for arg in self._context['call'].args: + if hasattr(arg, 'attr'): + args.append(arg.attr) + else: + args.append(self._get_literal_value(arg)) + return args + + @property + def call_args_count(self): + '''Get the number of args a function call has + + :return: The number of args a function call has + ''' + if 'call' in self._context and hasattr(self._context['call'], 'args'): + return len(self._context['call'].args) + else: + return None + + @property + def call_function_name(self): + '''Get the name (not FQ) of a function call + + :return: The name (not FQ) of a function call + ''' + if 'name' in self._context: + return self._context['name'] + else: + return None + + @property + def call_function_name_qual(self): + '''Get the FQ name of a function call + + :return: The FQ name of a function call + ''' + if 'qualname' in self._context: + return self._context['qualname'] + else: + return None + + @property + def call_keywords(self): + '''Get a dictionary of keyword parameters + + :return: A dictionary of keyword parameters for a call as strings + ''' + if ('call' in self._context and + hasattr(self._context['call'], 'keywords')): + return_dict = {} + for li in self._context['call'].keywords: + if hasattr(li.value, 'attr'): + return_dict[li.arg] = li.value.attr + else: + return_dict[li.arg] = self._get_literal_value(li.value) + return return_dict + else: + return None + + @property + def node(self): + '''Get the raw AST node associated with the context + + :return: The raw AST node associated with the context + ''' + if 'node' in self._context: + return self._context['node'] + else: + return None + + @property + def string_val(self): + '''Get the value of a standalone unicode or string object + + :return: value of a standalone unicode or string object + ''' + if 'str' in self._context: + return self._context['str'] + else: + return None + + @property + def bytes_val(self): + '''Get the value of a standalone bytes object (py3 only) + + :return: value of a standalone bytes object + ''' + return self._context.get('bytes') + + @property + def string_val_as_escaped_bytes(self): + '''Get escaped value of the object. + + Turn the value of a string or bytes object into byte sequence with + unknown, control, and \\ characters escaped. + + This function should be used when looking for a known sequence in a + potentially badly encoded string in the code. + + :return: sequence of printable ascii bytes representing original string + ''' + val = self.string_val + if val is not None: + # it's any of str or unicode in py2, or str in py3 + return val.encode('unicode_escape') + + val = self.bytes_val + if val is not None: + return utils.escaped_bytes_representation(val) + + return None + + @property + def statement(self): + '''Get the raw AST for the current statement + + :return: The raw AST for the current statement + ''' + if 'statement' in self._context: + return self._context['statement'] + else: + return None + + @property + def function_def_defaults_qual(self): + '''Get a list of fully qualified default values in a function def + + :return: List of defaults + ''' + defaults = [] + if 'node' in self._context: + for default in self._context['node'].args.defaults: + defaults.append(utils.get_qual_attr( + default, + self._context['import_aliases'])) + return defaults + + def _get_literal_value(self, literal): + '''Utility function to turn AST literals into native Python types + + :param literal: The AST literal to convert + :return: The value of the AST literal + ''' + if isinstance(literal, _ast.Num): + literal_value = literal.n + + elif isinstance(literal, _ast.Str): + literal_value = literal.s + + elif isinstance(literal, _ast.List): + return_list = list() + for li in literal.elts: + return_list.append(self._get_literal_value(li)) + literal_value = return_list + + elif isinstance(literal, _ast.Tuple): + return_tuple = tuple() + for ti in literal.elts: + return_tuple = return_tuple + (self._get_literal_value(ti),) + literal_value = return_tuple + + elif isinstance(literal, _ast.Set): + return_set = set() + for si in literal.elts: + return_set.add(self._get_literal_value(si)) + literal_value = return_set + + elif isinstance(literal, _ast.Dict): + literal_value = dict(zip(literal.keys, literal.values)) + + elif isinstance(literal, _ast.Ellipsis): + # what do we want to do with this? + literal_value = None + + elif isinstance(literal, _ast.Name): + literal_value = literal.id + + # NOTE(sigmavirus24): NameConstants are only part of the AST in Python + # 3. NameConstants tend to refer to things like True and False. This + # prevents them from being re-assigned in Python 3. + elif six.PY3 and isinstance(literal, _ast.NameConstant): + literal_value = str(literal.value) + + # NOTE(sigmavirus24): Bytes are only part of the AST in Python 3 + elif six.PY3 and isinstance(literal, _ast.Bytes): + literal_value = literal.s + + else: + literal_value = None + + return literal_value + + def get_call_arg_value(self, argument_name): + '''Gets the value of a named argument in a function call. + + :return: named argument value + ''' + kwd_values = self.call_keywords + if kwd_values is not None and argument_name in kwd_values: + return kwd_values[argument_name] + + def check_call_arg_value(self, argument_name, argument_values=None): + '''Checks for a value of a named argument in a function call. + + Returns none if the specified argument is not found. + :param argument_name: A string - name of the argument to look for + :param argument_values: the value, or list of values to test against + :return: Boolean True if argument found and matched, False if + found and not matched, None if argument not found at all + ''' + arg_value = self.get_call_arg_value(argument_name) + if arg_value is not None: + if not isinstance(argument_values, list): + # if passed a single value, or a tuple, convert to a list + argument_values = list((argument_values,)) + for val in argument_values: + if arg_value == val: + return True + return False + else: + # argument name not found, return None to allow testing for this + # eventuality + return None + + def get_lineno_for_call_arg(self, argument_name): + '''Get the line number for a specific named argument + + In case the call is split over multiple lines, get the correct one for + the argument. + :param argument_name: A string - name of the argument to look for + :return: Integer - the line number of the found argument, or -1 + ''' + for key in self.node.keywords: + if key.arg == argument_name: + return key.value.lineno + + def get_call_arg_at_position(self, position_num): + '''Returns positional argument at the specified position (if it exists) + + :param position_num: The index of the argument to return the value for + :return: Value of the argument at the specified position if it exists + ''' + if ('call' in self._context and + hasattr(self._context['call'], 'args') and + position_num < len(self._context['call'].args)): + return self._get_literal_value( + self._context['call'].args[position_num] + ) + else: + return None + + def is_module_being_imported(self, module): + '''Check for the specified module is currently being imported + + :param module: The module name to look for + :return: True if the module is found, False otherwise + ''' + return 'module' in self._context and self._context['module'] == module + + def is_module_imported_exact(self, module): + '''Check if a specified module has been imported; only exact matches. + + :param module: The module name to look for + :return: True if the module is found, False otherwise + ''' + return ('imports' in self._context and + module in self._context['imports']) + + def is_module_imported_like(self, module): + '''Check if a specified module has been imported + + Check if a specified module has been imported; specified module exists + as part of any import statement. + :param module: The module name to look for + :return: True if the module is found, False otherwise + ''' + if 'imports' in self._context: + for imp in self._context['imports']: + if module in imp: + return True + return False diff --git a/env_27/lib/python2.7/site-packages/bandit/core/docs_utils.py b/env_27/lib/python2.7/site-packages/bandit/core/docs_utils.py new file mode 100644 index 0000000..c26ae90 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/docs_utils.py @@ -0,0 +1,53 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# where our docs are hosted +BASE_URL = 'https://bandit.readthedocs.io/en/latest/' + + +def get_url(bid): + # NOTE(tkelsey): for some reason this import can't be found when stevedore + # loads up the formatter plugin that imports this file. It is available + # later though. + from bandit.core import extension_loader + + info = extension_loader.MANAGER.plugins_by_id.get(bid) + if info is not None: + return '%splugins/%s_%s.html' % (BASE_URL, bid.lower(), + info.plugin.__name__) + + info = extension_loader.MANAGER.blacklist_by_id.get(bid) + if info is not None: + template = 'blacklists/blacklist_{kind}.html#{id}-{name}' + info['name'] = info['name'].replace('_', '-') + + if info['id'].startswith('B3'): # B3XX + # Some of the links are combined, so we have exception cases + if info['id'] in ['B304', 'B305']: + info['id'] = 'b304-b305' + info['name'] = 'ciphers-and-modes' + elif info['id'] in ['B313', 'B314', 'B315', 'B316', 'B317', + 'B318', 'B319', 'B320']: + info['id'] = 'b313-b320' + ext = template.format( + kind='calls', id=info['id'], name=info['name']) + else: + ext = template.format( + kind='imports', id=info['id'], name=info['name']) + + return BASE_URL + ext.lower() + + return BASE_URL # no idea, give the docs main page diff --git a/env_27/lib/python2.7/site-packages/bandit/core/extension_loader.py b/env_27/lib/python2.7/site-packages/bandit/core/extension_loader.py new file mode 100644 index 0000000..4b0dcba --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/extension_loader.py @@ -0,0 +1,118 @@ +# -*- coding:utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import sys + +import six +from stevedore import extension + +from bandit.core import utils + + +class Manager(object): + # These IDs are for bandit built in tests + builtin = [ + 'B001' # Built in blacklist test + ] + + def __init__(self, formatters_namespace='bandit.formatters', + plugins_namespace='bandit.plugins', + blacklists_namespace='bandit.blacklists'): + # Cache the extension managers, loaded extensions, and extension names + self.load_formatters(formatters_namespace) + self.load_plugins(plugins_namespace) + self.load_blacklists(blacklists_namespace) + + def load_formatters(self, formatters_namespace): + self.formatters_mgr = extension.ExtensionManager( + namespace=formatters_namespace, + invoke_on_load=False, + verify_requirements=False, + ) + self.formatters = list(self.formatters_mgr) + self.formatter_names = self.formatters_mgr.names() + + def load_plugins(self, plugins_namespace): + self.plugins_mgr = extension.ExtensionManager( + namespace=plugins_namespace, + invoke_on_load=False, + verify_requirements=False, + ) + + def test_has_id(plugin): + if not hasattr(plugin.plugin, "_test_id"): + # logger not setup yet, so using print + print("WARNING: Test '%s' has no ID, skipping." % plugin.name, + file=sys.stderr) + return False + return True + + self.plugins = list(filter(test_has_id, list(self.plugins_mgr))) + self.plugin_names = [plugin.name for plugin in self.plugins] + self.plugins_by_id = {p.plugin._test_id: p for p in self.plugins} + self.plugins_by_name = {p.name: p for p in self.plugins} + + def get_plugin_id(self, plugin_name): + if plugin_name in self.plugins_by_name: + return self.plugins_by_name[plugin_name].plugin._test_id + return None + + def load_blacklists(self, blacklist_namespace): + self.blacklists_mgr = extension.ExtensionManager( + namespace=blacklist_namespace, + invoke_on_load=False, + verify_requirements=False, + ) + self.blacklist = {} + blacklist = list(self.blacklists_mgr) + for item in blacklist: + for key, val in item.plugin().items(): + utils.check_ast_node(key) + self.blacklist.setdefault(key, []).extend(val) + + self.blacklist_by_id = {} + self.blacklist_by_name = {} + for val in six.itervalues(self.blacklist): + for b in val: + self.blacklist_by_id[b['id']] = b + self.blacklist_by_name[b['name']] = b + + def validate_profile(self, profile): + '''Validate that everything in the configured profiles looks good.''' + for inc in profile['include']: + if not self.check_id(inc): + raise ValueError('Unknown test found in profile: %s' % inc) + + for exc in profile['exclude']: + if not self.check_id(exc): + raise ValueError('Unknown test found in profile: %s' % exc) + + union = set(profile['include']) & set(profile['exclude']) + if len(union) > 0: + raise ValueError('Non-exclusive include/exclude test sets: %s' % + union) + + def check_id(self, test): + return ( + test in self.plugins_by_id or + test in self.blacklist_by_id or + test in self.builtin) + +# Using entry-points and pkg_resources *can* be expensive. So let's load these +# once, store them on the object, and have a module global object for +# accessing them. After the first time this module is imported, it should save +# this attribute on the module and not have to reload the entry-points. +MANAGER = Manager() diff --git a/env_27/lib/python2.7/site-packages/bandit/core/issue.py b/env_27/lib/python2.7/site-packages/bandit/core/issue.py new file mode 100644 index 0000000..32149c6 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/issue.py @@ -0,0 +1,139 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import division +from __future__ import unicode_literals + +import linecache + +from six import moves + +from bandit.core import constants + + +class Issue(object): + def __init__(self, severity, confidence=constants.CONFIDENCE_DEFAULT, + text="", ident=None, lineno=None, test_id=""): + self.severity = severity + self.confidence = confidence + if isinstance(text, bytes): + text = text.decode('utf-8') + self.text = text + self.ident = ident + self.fname = "" + self.test = "" + self.test_id = test_id + self.lineno = lineno + self.linerange = [] + + def __str__(self): + return ("Issue: '%s' from %s:%s: Severity: %s Confidence: " + "%s at %s:%i") % (self.text, self.test_id, + (self.ident or self.test), self.severity, + self.confidence, self.fname, self.lineno) + + def __eq__(self, other): + # if the issue text, severity, confidence, and filename match, it's + # the same issue from our perspective + match_types = ['text', 'severity', 'confidence', 'fname', 'test', + 'test_id'] + return all(getattr(self, field) == getattr(other, field) + for field in match_types) + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return id(self) + + def filter(self, severity, confidence): + '''Utility to filter on confidence and severity + + This function determines whether an issue should be included by + comparing the severity and confidence rating of the issue to minimum + thresholds specified in 'severity' and 'confidence' respectively. + + Formatters should call manager.filter_results() directly. + + This will return false if either the confidence or severity of the + issue are lower than the given threshold values. + + :param severity: Severity threshold + :param confidence: Confidence threshold + :return: True/False depending on whether issue meets threshold + + ''' + rank = constants.RANKING + return (rank.index(self.severity) >= rank.index(severity) and + rank.index(self.confidence) >= rank.index(confidence)) + + def get_code(self, max_lines=3, tabbed=False): + '''Gets lines of code from a file the generated this issue. + + :param max_lines: Max lines of context to return + :param tabbed: Use tabbing in the output + :return: strings of code + ''' + lines = [] + max_lines = max(max_lines, 1) + lmin = max(1, self.lineno - max_lines // 2) + lmax = lmin + len(self.linerange) + max_lines - 1 + + tmplt = "%i\t%s" if tabbed else "%i %s" + for line in moves.xrange(lmin, lmax): + text = linecache.getline(self.fname, line) + + if isinstance(text, bytes): + text = text.decode('utf-8') + + if not len(text): + break + lines.append(tmplt % (line, text)) + return ''.join(lines) + + def as_dict(self, with_code=True): + '''Convert the issue to a dict of values for outputting.''' + out = { + 'filename': self.fname, + 'test_name': self.test, + 'test_id': self.test_id, + 'issue_severity': self.severity, + 'issue_confidence': self.confidence, + 'issue_text': self.text.encode('utf-8').decode('utf-8'), + 'line_number': self.lineno, + 'line_range': self.linerange, + } + + if with_code: + out['code'] = self.get_code() + return out + + def from_dict(self, data, with_code=True): + self.code = data["code"] + self.fname = data["filename"] + self.severity = data["issue_severity"] + self.confidence = data["issue_confidence"] + self.text = data["issue_text"] + self.test = data["test_name"] + self.test_id = data["test_id"] + self.lineno = data["line_number"] + self.linerange = data["line_range"] + + +def issue_from_dict(data): + i = Issue(severity=data["issue_severity"]) + i.from_dict(data) + return i diff --git a/env_27/lib/python2.7/site-packages/bandit/core/manager.py b/env_27/lib/python2.7/site-packages/bandit/core/manager.py new file mode 100644 index 0000000..cb8b574 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/manager.py @@ -0,0 +1,398 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import fnmatch +import json +import logging +import os +import sys +import traceback + +from bandit.core import constants as b_constants +from bandit.core import extension_loader +from bandit.core import issue +from bandit.core import meta_ast as b_meta_ast +from bandit.core import metrics +from bandit.core import node_visitor as b_node_visitor +from bandit.core import test_set as b_test_set + + +LOG = logging.getLogger(__name__) + + +class BanditManager(object): + + scope = [] + + def __init__(self, config, agg_type, debug=False, verbose=False, + profile=None, ignore_nosec=False): + '''Get logger, config, AST handler, and result store ready + + :param config: config options object + :type config: bandit.core.BanditConfig + :param agg_type: aggregation type + :param debug: Whether to show debug messages or not + :param verbose: Whether to show verbose output + :param profile_name: Optional name of profile to use (from cmd line) + :param ignore_nosec: Whether to ignore #nosec or not + :return: + ''' + self.debug = debug + self.verbose = verbose + if not profile: + profile = {} + self.ignore_nosec = ignore_nosec + self.b_conf = config + self.files_list = [] + self.excluded_files = [] + self.b_ma = b_meta_ast.BanditMetaAst() + self.skipped = [] + self.results = [] + self.baseline = [] + self.agg_type = agg_type + self.metrics = metrics.Metrics() + self.b_ts = b_test_set.BanditTestSet(config, profile) + + # set the increment of after how many files to show progress + self.progress = b_constants.progress_increment + self.scores = [] + + def get_skipped(self): + ret = [] + # "skip" is a tuple of name and reason, decode just the name + for skip in self.skipped: + if isinstance(skip[0], bytes): + ret.append((skip[0].decode('utf-8'), skip[1])) + else: + ret.append(skip) + return ret + + def get_issue_list(self, + sev_level=b_constants.LOW, + conf_level=b_constants.LOW): + return self.filter_results(sev_level, conf_level) + + def populate_baseline(self, data): + '''Populate a baseline set of issues from a JSON report + + This will populate a list of baseline issues discovered from a previous + run of bandit. Later this baseline can be used to filter out the result + set, see filter_results. + ''' + items = [] + try: + jdata = json.loads(data) + items = [issue.issue_from_dict(j) for j in jdata["results"]] + except Exception as e: + LOG.warning("Failed to load baseline data: %s", e) + self.baseline = items + + def filter_results(self, sev_filter, conf_filter): + '''Returns a list of results filtered by the baseline + + This works by checking the number of results returned from each file we + process. If the number of results is different to the number reported + for the same file in the baseline, then we return all results for the + file. We can't reliably return just the new results, as line numbers + will likely have changed. + + :param sev_filter: severity level filter to apply + :param conf_filter: confidence level filter to apply + ''' + + results = [i for i in self.results if + i.filter(sev_filter, conf_filter)] + + if not self.baseline: + return results + + unmatched = _compare_baseline_results(self.baseline, results) + # if it's a baseline we'll return a dictionary of issues and a list of + # candidate issues + return _find_candidate_matches(unmatched, results) + + def results_count(self, sev_filter=b_constants.LOW, + conf_filter=b_constants.LOW): + '''Return the count of results + + :param sev_filter: Severity level to filter lower + :param conf_filter: Confidence level to filter + :return: Number of results in the set + ''' + return len(self.get_issue_list(sev_filter, conf_filter)) + + def output_results(self, lines, sev_level, conf_level, output_file, + output_format, template=None): + '''Outputs results from the result store + + :param lines: How many surrounding lines to show per result + :param sev_level: Which severity levels to show (LOW, MEDIUM, HIGH) + :param conf_level: Which confidence levels to show (LOW, MEDIUM, HIGH) + :param output_file: File to store results + :param output_format: output format plugin name + :param template: Output template with non-terminal tags + (default: {abspath}:{line}: + {test_id}[bandit]: {severity}: {msg}) + :return: - + ''' + try: + formatters_mgr = extension_loader.MANAGER.formatters_mgr + if output_format not in formatters_mgr: + output_format = 'screen' if sys.stdout.isatty() else 'txt' + + formatter = formatters_mgr[output_format] + report_func = formatter.plugin + if output_format == 'custom': + report_func(self, fileobj=output_file, sev_level=sev_level, + conf_level=conf_level, lines=lines, + template=template) + else: + report_func(self, fileobj=output_file, sev_level=sev_level, + conf_level=conf_level, lines=lines) + + except Exception as e: + raise RuntimeError("Unable to output report using '%s' formatter: " + "%s" % (output_format, str(e))) + + def discover_files(self, targets, recursive=False, excluded_paths=''): + '''Add tests directly and from a directory to the test set + + :param targets: The command line list of files and directories + :param recursive: True/False - whether to add all files from dirs + :return: + ''' + # We'll mantain a list of files which are added, and ones which have + # been explicitly excluded + files_list = set() + excluded_files = set() + + excluded_path_strings = self.b_conf.get_option('exclude_dirs') or [] + included_globs = self.b_conf.get_option('include') or ['*.py'] + + # if there are command line provided exclusions add them to the list + if excluded_paths: + for path in excluded_paths.split(','): + excluded_path_strings.append(path) + + # build list of files we will analyze + for fname in targets: + # if this is a directory and recursive is set, find all files + if os.path.isdir(fname): + if recursive: + new_files, newly_excluded = _get_files_from_dir( + fname, + included_globs=included_globs, + excluded_path_strings=excluded_path_strings + ) + files_list.update(new_files) + excluded_files.update(newly_excluded) + else: + LOG.warning("Skipping directory (%s), use -r flag to " + "scan contents", fname) + + else: + # if the user explicitly mentions a file on command line, + # we'll scan it, regardless of whether it's in the included + # file types list + if _is_file_included(fname, included_globs, + excluded_path_strings, + enforce_glob=False): + files_list.add(fname) + else: + excluded_files.add(fname) + + self.files_list = sorted(files_list) + self.excluded_files = sorted(excluded_files) + + def run_tests(self): + '''Runs through all files in the scope + + :return: - + ''' + # display progress, if number of files warrants it + if len(self.files_list) > self.progress: + sys.stderr.write("%s [" % len(self.files_list)) + + # if we have problems with a file, we'll remove it from the files_list + # and add it to the skipped list instead + new_files_list = list(self.files_list) + + for count, fname in enumerate(self.files_list): + LOG.debug("working on file : %s", fname) + + if len(self.files_list) > self.progress: + # is it time to update the progress indicator? + if count % self.progress == 0: + sys.stderr.write("%s.. " % count) + sys.stderr.flush() + try: + if fname == '-': + sys.stdin = os.fdopen(sys.stdin.fileno(), 'rb', 0) + self._parse_file('', sys.stdin, new_files_list) + else: + with open(fname, 'rb') as fdata: + self._parse_file(fname, fdata, new_files_list) + except IOError as e: + self.skipped.append((fname, e.strerror)) + new_files_list.remove(fname) + + if len(self.files_list) > self.progress: + sys.stderr.write("]\n") + sys.stderr.flush() + + # reflect any files which may have been skipped + self.files_list = new_files_list + + # do final aggregation of metrics + self.metrics.aggregate() + + def _parse_file(self, fname, fdata, new_files_list): + try: + # parse the current file + data = fdata.read() + lines = data.splitlines() + self.metrics.begin(fname) + self.metrics.count_locs(lines) + if self.ignore_nosec: + nosec_lines = set() + else: + nosec_lines = set( + lineno + 1 for + (lineno, line) in enumerate(lines) + if b'#nosec' in line or b'# nosec' in line) + score = self._execute_ast_visitor(fname, data, nosec_lines) + self.scores.append(score) + self.metrics.count_issues([score, ]) + except KeyboardInterrupt as e: + sys.exit(2) + except SyntaxError as e: + self.skipped.append((fname, + "syntax error while parsing AST from file")) + new_files_list.remove(fname) + except Exception as e: + LOG.error("Exception occurred when executing tests against " + "%s. Run \"bandit --debug %s\" to see the full " + "traceback.", fname, fname) + self.skipped.append((fname, 'exception while scanning file')) + new_files_list.remove(fname) + LOG.debug(" Exception string: %s", e) + LOG.debug(" Exception traceback: %s", traceback.format_exc()) + + def _execute_ast_visitor(self, fname, data, nosec_lines): + '''Execute AST parse on each file + + :param fname: The name of the file being parsed + :param data: Original file contents + :param lines: The lines of code to process + :return: The accumulated test score + ''' + score = [] + res = b_node_visitor.BanditNodeVisitor(fname, self.b_ma, + self.b_ts, self.debug, + nosec_lines, self.metrics) + + score = res.process(data) + self.results.extend(res.tester.results) + return score + + +def _get_files_from_dir(files_dir, included_globs=None, + excluded_path_strings=None): + if not included_globs: + included_globs = ['*.py'] + if not excluded_path_strings: + excluded_path_strings = [] + + files_list = set() + excluded_files = set() + + for root, subdirs, files in os.walk(files_dir): + for filename in files: + path = os.path.join(root, filename) + if _is_file_included(path, included_globs, excluded_path_strings): + files_list.add(path) + else: + excluded_files.add(path) + + return files_list, excluded_files + + +def _is_file_included(path, included_globs, excluded_path_strings, + enforce_glob=True): + '''Determine if a file should be included based on filename + + This utility function determines if a file should be included based + on the file name, a list of parsed extensions, excluded paths, and a flag + specifying whether extensions should be enforced. + + :param path: Full path of file to check + :param parsed_extensions: List of parsed extensions + :param excluded_paths: List of paths from which we should not include files + :param enforce_glob: Can set to false to bypass extension check + :return: Boolean indicating whether a file should be included + ''' + return_value = False + + # if this is matches a glob of files we look at, and it isn't in an + # excluded path + if _matches_glob_list(path, included_globs) or not enforce_glob: + if not any(x in path for x in excluded_path_strings): + return_value = True + + return return_value + + +def _matches_glob_list(filename, glob_list): + for glob in glob_list: + if fnmatch.fnmatch(filename, glob): + return True + return False + + +def _compare_baseline_results(baseline, results): + """Compare a baseline list of issues to list of results + + This function compares a baseline set of issues to a current set of issues + to find results that weren't present in the baseline. + + :param baseline: Baseline list of issues + :param results: Current list of issues + :return: List of unmatched issues + """ + return [a for a in results if a not in baseline] + + +def _find_candidate_matches(unmatched_issues, results_list): + """Returns a dictionary with issue candidates + + For example, let's say we find a new command injection issue in a file + which used to have two. Bandit can't tell which of the command injection + issues in the file are new, so it will show all three. The user should + be able to pick out the new one. + + :param unmatched_issues: List of issues that weren't present before + :param results_list: Master list of current Bandit findings + :return: A dictionary with a list of candidates for each issue + """ + + issue_candidates = collections.OrderedDict() + + for unmatched in unmatched_issues: + issue_candidates[unmatched] = ([i for i in results_list if + unmatched == i]) + + return issue_candidates diff --git a/env_27/lib/python2.7/site-packages/bandit/core/meta_ast.py b/env_27/lib/python2.7/site-packages/bandit/core/meta_ast.py new file mode 100644 index 0000000..d9654b7 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/meta_ast.py @@ -0,0 +1,57 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import collections +import logging + + +LOG = logging.getLogger(__name__) + + +class BanditMetaAst(object): + + nodes = collections.OrderedDict() + + def __init__(self): + pass + + def add_node(self, node, parent_id, depth): + '''Add a node to the AST node collection + + :param node: The AST node to add + :param parent_id: The ID of the node's parent + :param depth: The depth of the node + :return: - + ''' + node_id = hex(id(node)) + LOG.debug('adding node : %s [%s]', node_id, depth) + self.nodes[node_id] = { + 'raw': node, 'parent_id': parent_id, 'depth': depth + } + + def __str__(self): + '''Dumps a listing of all of the nodes + + Dumps a listing of all of the nodes for debugging purposes + :return: - + ''' + tmpstr = "" + for k, v in self.nodes.items(): + tmpstr += "Node: %s\n" % k + tmpstr += "\t%s\n" % str(v) + tmpstr += "Length: %s\n" % len(self.nodes) + return tmpstr diff --git a/env_27/lib/python2.7/site-packages/bandit/core/metrics.py b/env_27/lib/python2.7/site-packages/bandit/core/metrics.py new file mode 100644 index 0000000..aed616e --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/metrics.py @@ -0,0 +1,103 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections + +from bandit.core import constants + + +class Metrics(object): + """Bandit metric gathering. + + This class is a singleton used to gather and process metrics collected when + processing a code base with bandit. Metric collection is stateful, that + is, an active metric block will be set when requested and all subsequent + operations will effect that metric block until it is replaced by a setting + a new one. + """ + + def __init__(self): + self.data = dict() + self.data['_totals'] = {'loc': 0, 'nosec': 0} + + # initialize 0 totals for criteria and rank; this will be reset later + for rank in constants.RANKING: + for criteria in constants.CRITERIA: + self.data['_totals']['{0}.{1}'.format(criteria[0], rank)] = 0 + + def begin(self, fname): + """Begin a new metric block. + + This starts a new metric collection name "fname" and makes is active. + + :param fname: the metrics unique name, normally the file name. + """ + self.data[fname] = {'loc': 0, 'nosec': 0} + self.current = self.data[fname] + + def note_nosec(self, num=1): + """Note a "nosec" commnet. + + Increment the currently active metrics nosec count. + + :param num: number of nosecs seen, defaults to 1 + """ + self.current['nosec'] += num + + def count_locs(self, lines): + """Count lines of code. + + We count lines that are not empty and are not comments. The result is + added to our currently active metrics loc count (normally this is 0). + + :param lines: lines in the file to process + """ + def proc(line): + tmp = line.strip() + return bool(tmp and not tmp.startswith(b'#')) + + self.current['loc'] += sum(proc(line) for line in lines) + + def count_issues(self, scores): + self.current.update(self._get_issue_counts(scores)) + + def aggregate(self): + """Do final aggregation of metrics.""" + c = collections.Counter() + for fname in self.data: + c.update(self.data[fname]) + self.data['_totals'] = dict(c) + + @staticmethod + def _get_issue_counts(scores): + """Get issue counts aggregated by confidence/severity rankings. + + :param scores: list of scores to aggregate / count + :return: aggregated total (count) of issues identified + """ + issue_counts = {} + for score in scores: + for (criteria, default) in constants.CRITERIA: + for i, rank in enumerate(constants.RANKING): + label = '{0}.{1}'.format(criteria, rank) + if label not in issue_counts: + issue_counts[label] = 0 + count = ( + score[criteria][i] / + constants.RANKING_VALUES[rank] + ) + issue_counts[label] += count + return issue_counts diff --git a/env_27/lib/python2.7/site-packages/bandit/core/node_visitor.py b/env_27/lib/python2.7/site-packages/bandit/core/node_visitor.py new file mode 100644 index 0000000..b9c51eb --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/node_visitor.py @@ -0,0 +1,279 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ast +import logging +import operator + +from bandit.core import constants +from bandit.core import tester as b_tester +from bandit.core import utils as b_utils + + +LOG = logging.getLogger(__name__) + + +class BanditNodeVisitor(object): + def __init__(self, fname, metaast, testset, + debug, nosec_lines, metrics): + self.debug = debug + self.nosec_lines = nosec_lines + self.seen = 0 + self.scores = { + 'SEVERITY': [0] * len(constants.RANKING), + 'CONFIDENCE': [0] * len(constants.RANKING) + } + self.depth = 0 + self.fname = fname + self.metaast = metaast + self.testset = testset + self.imports = set() + self.import_aliases = {} + self.tester = b_tester.BanditTester( + self.testset, self.debug, nosec_lines) + + # in some cases we can't determine a qualified name + try: + self.namespace = b_utils.get_module_qualname_from_path(fname) + except b_utils.InvalidModulePath: + LOG.info('Unable to find qualified name for module: %s', + self.fname) + self.namespace = "" + LOG.debug('Module qualified name: %s', self.namespace) + self.metrics = metrics + + def visit_ClassDef(self, node): + '''Visitor for AST ClassDef node + + Add class name to current namespace for all descendants. + :param node: Node being inspected + :return: - + ''' + # For all child nodes, add this class name to current namespace + self.namespace = b_utils.namespace_path_join(self.namespace, node.name) + + def visit_FunctionDef(self, node): + '''Visitor for AST FunctionDef nodes + + add relevant information about the node to + the context for use in tests which inspect function definitions. + Add the function name to the current namespace for all descendants. + :param node: The node that is being inspected + :return: - + ''' + + self.context['function'] = node + qualname = self.namespace + '.' + b_utils.get_func_name(node) + name = qualname.split('.')[-1] + + self.context['qualname'] = qualname + self.context['name'] = name + + # For all child nodes and any tests run, add this function name to + # current namespace + self.namespace = b_utils.namespace_path_join(self.namespace, name) + self.update_scores(self.tester.run_tests(self.context, 'FunctionDef')) + + def visit_Call(self, node): + '''Visitor for AST Call nodes + + add relevant information about the node to + the context for use in tests which inspect function calls. + :param node: The node that is being inspected + :return: - + ''' + + self.context['call'] = node + qualname = b_utils.get_call_name(node, self.import_aliases) + name = qualname.split('.')[-1] + + self.context['qualname'] = qualname + self.context['name'] = name + + self.update_scores(self.tester.run_tests(self.context, 'Call')) + + def visit_Import(self, node): + '''Visitor for AST Import nodes + + add relevant information about node to + the context for use in tests which inspect imports. + :param node: The node that is being inspected + :return: - + ''' + for nodename in node.names: + if nodename.asname: + self.import_aliases[nodename.asname] = nodename.name + self.imports.add(nodename.name) + self.context['module'] = nodename.name + self.update_scores(self.tester.run_tests(self.context, 'Import')) + + def visit_ImportFrom(self, node): + '''Visitor for AST ImportFrom nodes + + add relevant information about node to + the context for use in tests which inspect imports. + :param node: The node that is being inspected + :return: - + ''' + module = node.module + if module is None: + return self.visit_Import(node) + + for nodename in node.names: + # TODO(ljfisher) Names in import_aliases could be overridden + # by local definitions. If this occurs bandit will see the + # name in import_aliases instead of the local definition. + # We need better tracking of names. + if nodename.asname: + self.import_aliases[nodename.asname] = ( + module + "." + nodename.name + ) + else: + # Even if import is not aliased we need an entry that maps + # name to module.name. For example, with 'from a import b' + # b should be aliased to the qualified name a.b + self.import_aliases[nodename.name] = (module + '.' + + nodename.name) + self.imports.add(module + "." + nodename.name) + self.context['module'] = module + self.context['name'] = nodename.name + self.update_scores(self.tester.run_tests(self.context, 'ImportFrom')) + + def visit_Str(self, node): + '''Visitor for AST String nodes + + add relevant information about node to + the context for use in tests which inspect strings. + :param node: The node that is being inspected + :return: - + ''' + self.context['str'] = node.s + if not isinstance(node.parent, ast.Expr): # docstring + self.context['linerange'] = b_utils.linerange_fix(node.parent) + self.update_scores(self.tester.run_tests(self.context, 'Str')) + + def visit_Bytes(self, node): + '''Visitor for AST Bytes nodes + + add relevant information about node to + the context for use in tests which inspect strings. + :param node: The node that is being inspected + :return: - + ''' + self.context['bytes'] = node.s + if not isinstance(node.parent, ast.Expr): # docstring + self.context['linerange'] = b_utils.linerange_fix(node.parent) + self.update_scores(self.tester.run_tests(self.context, 'Bytes')) + + def pre_visit(self, node): + self.context = {} + self.context['imports'] = self.imports + self.context['import_aliases'] = self.import_aliases + + if self.debug: + LOG.debug(ast.dump(node)) + self.metaast.add_node(node, '', self.depth) + + if hasattr(node, 'lineno'): + self.context['lineno'] = node.lineno + + if node.lineno in self.nosec_lines: + LOG.debug("skipped, nosec") + self.metrics.note_nosec() + return False + + self.context['node'] = node + self.context['linerange'] = b_utils.linerange_fix(node) + self.context['filename'] = self.fname + + self.seen += 1 + LOG.debug("entering: %s %s [%s]", hex(id(node)), type(node), + self.depth) + self.depth += 1 + LOG.debug(self.context) + return True + + def visit(self, node): + name = node.__class__.__name__ + method = 'visit_' + name + visitor = getattr(self, method, None) + if visitor is not None: + if self.debug: + LOG.debug("%s called (%s)", method, ast.dump(node)) + visitor(node) + else: + self.update_scores(self.tester.run_tests(self.context, name)) + + def post_visit(self, node): + self.depth -= 1 + LOG.debug("%s\texiting : %s", self.depth, hex(id(node))) + + # HACK(tkelsey): this is needed to clean up post-recursion stuff that + # gets setup in the visit methods for these node types. + if isinstance(node, ast.FunctionDef) or isinstance(node, ast.ClassDef): + self.namespace = b_utils.namespace_path_split(self.namespace)[0] + + def generic_visit(self, node): + """Drive the visitor.""" + for _, value in ast.iter_fields(node): + if isinstance(value, list): + max_idx = len(value) - 1 + for idx, item in enumerate(value): + if isinstance(item, ast.AST): + if idx < max_idx: + setattr(item, 'sibling', value[idx + 1]) + else: + setattr(item, 'sibling', None) + setattr(item, 'parent', node) + + if self.pre_visit(item): + self.visit(item) + self.generic_visit(item) + self.post_visit(item) + + elif isinstance(value, ast.AST): + setattr(value, 'sibling', None) + setattr(value, 'parent', node) + + if self.pre_visit(value): + self.visit(value) + self.generic_visit(value) + self.post_visit(value) + + def update_scores(self, scores): + '''Score updater + + Since we moved from a single score value to a map of scores per + severity, this is needed to update the stored list. + :param score: The score list to update our scores with + ''' + # we'll end up with something like: + # SEVERITY: {0, 0, 0, 10} where 10 is weighted by finding and level + for score_type in self.scores: + self.scores[score_type] = list(map( + operator.add, self.scores[score_type], scores[score_type] + )) + + def process(self, data): + '''Main process loop + + Build and process the AST + :param lines: lines code to process + :return score: the aggregated score for the current file + ''' + f_ast = ast.parse(data) + self.generic_visit(f_ast) + return self.scores diff --git a/env_27/lib/python2.7/site-packages/bandit/core/test_properties.py b/env_27/lib/python2.7/site-packages/bandit/core/test_properties.py new file mode 100644 index 0000000..cb37c7c --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/test_properties.py @@ -0,0 +1,85 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from bandit.core import utils + +LOG = logging.getLogger(__name__) + + +def checks(*args): + '''Decorator function to set checks to be run.''' + def wrapper(func): + if not hasattr(func, "_checks"): + func._checks = [] + func._checks.extend(utils.check_ast_node(a) for a in args) + + LOG.debug('checks() decorator executed') + LOG.debug(' func._checks: %s', func._checks) + return func + return wrapper + + +def takes_config(*args): + '''Test function takes config + + Use of this delegate before a test function indicates that it should be + passed data from the config file. Passing a name parameter allows + aliasing tests and thus sharing config options. + ''' + name = "" + + def _takes_config(func): + if not hasattr(func, "_takes_config"): + func._takes_config = name + return func + + if len(args) == 1 and callable(args[0]): + name = args[0].__name__ + return _takes_config(args[0]) + else: + name = args[0] + return _takes_config + + +def test_id(id_val): + '''Test function identifier + + Use this decorator before a test function indicates its simple ID + ''' + def _has_id(func): + if not hasattr(func, "_test_id"): + func._test_id = id_val + return func + return _has_id + + +def accepts_baseline(*args): + """Decorator to indicate formatter accepts baseline results + + Use of this decorator before a formatter indicates that it is able to deal + with baseline results. Specifically this means it has a way to display + candidate results and know when it should do so. + """ + def wrapper(func): + if not hasattr(func, '_accepts_baseline'): + func._accepts_baseline = True + + LOG.debug('accepts_baseline() decorator executed on %s', func.__name__) + + return func + return wrapper(args[0]) diff --git a/env_27/lib/python2.7/site-packages/bandit/core/test_set.py b/env_27/lib/python2.7/site-packages/bandit/core/test_set.py new file mode 100644 index 0000000..b2213e8 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/test_set.py @@ -0,0 +1,123 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import importlib +import logging + + +from bandit.core import blacklisting +from bandit.core import extension_loader + + +LOG = logging.getLogger(__name__) + + +class BanditTestSet(object): + def __init__(self, config, profile=None): + if not profile: + profile = {} + extman = extension_loader.MANAGER + filtering = self._get_filter(config, profile) + self.plugins = [p for p in extman.plugins + if p.plugin._test_id in filtering] + self.plugins.extend(self._load_builtins(filtering, profile)) + self._load_tests(config, self.plugins) + + @staticmethod + def _get_filter(config, profile): + extman = extension_loader.MANAGER + + inc = set(profile.get('include', [])) + exc = set(profile.get('exclude', [])) + + all_blacklist_tests = set() + for _node, tests in extman.blacklist.items(): + all_blacklist_tests.update(t['id'] for t in tests) + + # this block is purely for backwards compatibility, the rules are as + # follows: + # B001,B401 means B401 + # B401 means B401 + # B001 means all blacklist tests + if 'B001' in inc: + if not inc.intersection(all_blacklist_tests): + inc.update(all_blacklist_tests) + inc.discard('B001') + if 'B001' in exc: + if not exc.intersection(all_blacklist_tests): + exc.update(all_blacklist_tests) + exc.discard('B001') + + if inc: + filtered = inc + else: + filtered = set(extman.plugins_by_id.keys()) + filtered.update(extman.builtin) + filtered.update(all_blacklist_tests) + return filtered - exc + + def _load_builtins(self, filtering, profile): + '''loads up builtin functions, so they can be filtered.''' + + class Wrapper(object): + def __init__(self, name, plugin): + self.name = name + self.plugin = plugin + + extman = extension_loader.MANAGER + blacklist = profile.get('blacklist') + if not blacklist: # not overridden by legacy data + blacklist = {} + for node, tests in extman.blacklist.items(): + values = [t for t in tests if t['id'] in filtering] + if values: + blacklist[node] = values + + if not blacklist: + return [] + + # this dresses up the blacklist to look like a plugin, but + # the '_checks' data comes from the blacklist information. + # the '_config' is the filtered blacklist data set. + setattr(blacklisting.blacklist, "_test_id", 'B001') + setattr(blacklisting.blacklist, "_checks", blacklist.keys()) + setattr(blacklisting.blacklist, "_config", blacklist) + return [Wrapper('blacklist', blacklisting.blacklist)] + + def _load_tests(self, config, plugins): + '''Builds a dict mapping tests to node types.''' + self.tests = {} + for plugin in plugins: + if hasattr(plugin.plugin, '_takes_config'): + # TODO(??): config could come from profile ... + cfg = config.get_option(plugin.plugin._takes_config) + if cfg is None: + genner = importlib.import_module(plugin.plugin.__module__) + cfg = genner.gen_config(plugin.plugin._takes_config) + plugin.plugin._config = cfg + for check in plugin.plugin._checks: + self.tests.setdefault(check, []).append(plugin.plugin) + LOG.debug('added function %s (%s) targeting %s', + plugin.name, plugin.plugin._test_id, check) + + def get_tests(self, checktype): + '''Returns all tests that are of type checktype + + :param checktype: The type of test to filter on + :return: A list of tests which are of the specified type + ''' + return self.tests.get(checktype) or [] diff --git a/env_27/lib/python2.7/site-packages/bandit/core/tester.py b/env_27/lib/python2.7/site-packages/bandit/core/tester.py new file mode 100644 index 0000000..562766e --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/tester.py @@ -0,0 +1,111 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import logging +import warnings + +from bandit.core import constants +from bandit.core import context as b_context +from bandit.core import utils + +warnings.formatwarning = utils.warnings_formatter +LOG = logging.getLogger(__name__) + + +class BanditTester(object): + def __init__(self, testset, debug, nosec_lines): + self.results = [] + self.testset = testset + self.last_result = None + self.debug = debug + self.nosec_lines = nosec_lines + + def run_tests(self, raw_context, checktype): + '''Runs all tests for a certain type of check, for example + + Runs all tests for a certain type of check, for example 'functions' + store results in results. + + :param raw_context: Raw context dictionary + :param checktype: The type of checks to run + :param nosec_lines: Lines which should be skipped because of nosec + :return: a score based on the number and type of test results + ''' + + scores = { + 'SEVERITY': [0] * len(constants.RANKING), + 'CONFIDENCE': [0] * len(constants.RANKING) + } + + tests = self.testset.get_tests(checktype) + for test in tests: + name = test.__name__ + # execute test with the an instance of the context class + temp_context = copy.copy(raw_context) + context = b_context.Context(temp_context) + try: + if hasattr(test, '_config'): + result = test(context, test._config) + else: + result = test(context) + + # if we have a result, record it and update scores + if (result is not None and + result.lineno not in self.nosec_lines and + temp_context['lineno'] not in self.nosec_lines): + + if isinstance(temp_context['filename'], bytes): + result.fname = temp_context['filename'].decode('utf-8') + else: + result.fname = temp_context['filename'] + + if result.lineno is None: + result.lineno = temp_context['lineno'] + result.linerange = temp_context['linerange'] + result.test = name + if result.test_id == "": + result.test_id = test._test_id + + self.results.append(result) + + LOG.debug("Issue identified by %s: %s", name, result) + sev = constants.RANKING.index(result.severity) + val = constants.RANKING_VALUES[result.severity] + scores['SEVERITY'][sev] += val + con = constants.RANKING.index(result.confidence) + val = constants.RANKING_VALUES[result.confidence] + scores['CONFIDENCE'][con] += val + + except Exception as e: + self.report_error(name, context, e) + if self.debug: + raise + LOG.debug("Returning scores: %s", scores) + return scores + + @staticmethod + def report_error(test, context, error): + what = "Bandit internal error running: " + what += "%s " % test + what += "on file %s at line %i: " % ( + context._context['filename'], + context._context['lineno'] + ) + what += str(error) + import traceback + what += traceback.format_exc() + LOG.error(what) diff --git a/env_27/lib/python2.7/site-packages/bandit/core/utils.py b/env_27/lib/python2.7/site-packages/bandit/core/utils.py new file mode 100644 index 0000000..a16f564 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/core/utils.py @@ -0,0 +1,336 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import _ast +import ast +import logging +import os.path +import sys + +try: + import configparser +except ImportError: + import ConfigParser as configparser + +LOG = logging.getLogger(__name__) + + +"""Various helper functions.""" + + +def _get_attr_qual_name(node, aliases): + '''Get a the full name for the attribute node. + + This will resolve a pseudo-qualified name for the attribute + rooted at node as long as all the deeper nodes are Names or + Attributes. This will give you how the code referenced the name but + will not tell you what the name actually refers to. If we + encounter a node without a static name we punt with an + empty string. If this encounters something more complex, such as + foo.mylist[0](a,b) we just return empty string. + + :param node: AST Name or Attribute node + :param aliases: Import aliases dictionary + :returns: Qualified name referred to by the attribute or name. + ''' + if isinstance(node, _ast.Name): + if node.id in aliases: + return aliases[node.id] + return node.id + elif isinstance(node, _ast.Attribute): + name = '%s.%s' % (_get_attr_qual_name(node.value, aliases), node.attr) + if name in aliases: + return aliases[name] + return name + else: + return "" + + +def get_call_name(node, aliases): + if isinstance(node.func, _ast.Name): + if deepgetattr(node, 'func.id') in aliases: + return aliases[deepgetattr(node, 'func.id')] + return deepgetattr(node, 'func.id') + elif isinstance(node.func, _ast.Attribute): + return _get_attr_qual_name(node.func, aliases) + else: + return "" + + +def get_func_name(node): + return node.name # TODO(tkelsey): get that qualname using enclosing scope + + +def get_qual_attr(node, aliases): + prefix = "" + if isinstance(node, _ast.Attribute): + try: + val = deepgetattr(node, 'value.id') + if val in aliases: + prefix = aliases[val] + else: + prefix = deepgetattr(node, 'value.id') + except Exception: + # NOTE(tkelsey): degrade gracefully when we can't get the fully + # qualified name for an attr, just return its base name. + pass + + return "%s.%s" % (prefix, node.attr) + else: + return "" # TODO(tkelsey): process other node types + + +def deepgetattr(obj, attr): + """Recurses through an attribute chain to get the ultimate value.""" + for key in attr.split('.'): + obj = getattr(obj, key) + return obj + + +class InvalidModulePath(Exception): + pass + + +class ConfigError(Exception): + """Raised when the config file fails validation.""" + def __init__(self, message, config_file): + self.config_file = config_file + self.message = "{0} : {1}".format(config_file, message) + super(ConfigError, self).__init__(self.message) + + +class ProfileNotFound(Exception): + """Raised when chosen profile cannot be found.""" + def __init__(self, config_file, profile): + self.config_file = config_file + self.profile = profile + message = 'Unable to find profile (%s) in config file: %s' % ( + self.profile, self.config_file) + super(ProfileNotFound, self).__init__(message) + + +def warnings_formatter(message, category=UserWarning, filename='', lineno=-1, + line=''): + '''Monkey patch for warnings.warn to suppress cruft output.''' + return "{0}\n".format(message) + + +def get_module_qualname_from_path(path): + '''Get the module's qualified name by analysis of the path. + + Resolve the absolute pathname and eliminate symlinks. This could result in + an incorrect name if symlinks are used to restructure the python lib + directory. + + Starting from the right-most directory component look for __init__.py in + the directory component. If it exists then the directory name is part of + the module name. Move left to the subsequent directory components until a + directory is found without __init__.py. + + :param: Path to module file. Relative paths will be resolved relative to + current working directory. + :return: fully qualified module name + ''' + + (head, tail) = os.path.split(path) + if head == '' or tail == '': + raise InvalidModulePath('Invalid python file path: "%s"' + ' Missing path or file name' % (path)) + + qname = [os.path.splitext(tail)[0]] + while head not in ['/', '.', '']: + if os.path.isfile(os.path.join(head, '__init__.py')): + (head, tail) = os.path.split(head) + qname.insert(0, tail) + else: + break + + qualname = '.'.join(qname) + return qualname + + +def namespace_path_join(base, name): + '''Extend the current namespace path with an additional name + + Take a namespace path (i.e., package.module.class) and extends it + with an additional name (i.e., package.module.class.subclass). + This is similar to how os.path.join works. + + :param base: (String) The base namespace path. + :param name: (String) The new name to append to the base path. + :returns: (String) A new namespace path resulting from combination of + base and name. + ''' + return '%s.%s' % (base, name) + + +def namespace_path_split(path): + '''Split the namespace path into a pair (head, tail). + + Tail will be the last namespace path component and head will + be everything leading up to that in the path. This is similar to + os.path.split. + + :param path: (String) A namespace path. + :returns: (String, String) A tuple where the first component is the base + path and the second is the last path component. + ''' + return tuple(path.rsplit('.', 1)) + + +def escaped_bytes_representation(b): + '''PY3 bytes need escaping for comparison with other strings. + + In practice it turns control characters into acceptable codepoints then + encodes them into bytes again to turn unprintable bytes into printable + escape sequences. + + This is safe to do for the whole range 0..255 and result matches + unicode_escape on a unicode string. + ''' + return b.decode('unicode_escape').encode('unicode_escape') + + +def linerange(node): + """Get line number range from a node.""" + strip = {"body": None, "orelse": None, + "handlers": None, "finalbody": None} + for key in strip.keys(): + if hasattr(node, key): + strip[key] = getattr(node, key) + setattr(node, key, []) + + lines_min = 9999999999 + lines_max = -1 + for n in ast.walk(node): + if hasattr(n, 'lineno'): + lines_min = min(lines_min, n.lineno) + lines_max = max(lines_max, n.lineno) + + for key in strip.keys(): + if strip[key] is not None: + setattr(node, key, strip[key]) + + if lines_max > -1: + return list(range(lines_min, lines_max + 1)) + return [0, 1] + + +def linerange_fix(node): + """Try and work around a known Python bug with multi-line strings.""" + # deal with multiline strings lineno behavior (Python issue #16806) + lines = linerange(node) + if hasattr(node, 'sibling') and hasattr(node.sibling, 'lineno'): + start = min(lines) + delta = node.sibling.lineno - start + if delta > 1: + return list(range(start, node.sibling.lineno)) + return lines + + +def concat_string(node, stop=None): + '''Builds a string from a ast.BinOp chain. + + This will build a string from a series of ast.Str nodes wrapped in + ast.BinOp nodes. Something like "a" + "b" + "c" or "a %s" % val etc. + The provided node can be any participant in the BinOp chain. + + :param node: (ast.Str or ast.BinOp) The node to process + :param stop: (ast.Str or ast.BinOp) Optional base node to stop at + :returns: (Tuple) the root node of the expression, the string value + ''' + def _get(node, bits, stop=None): + if node != stop: + bits.append( + _get(node.left, bits, stop) + if isinstance(node.left, ast.BinOp) + else node.left) + bits.append( + _get(node.right, bits, stop) + if isinstance(node.right, ast.BinOp) + else node.right) + + bits = [node] + while isinstance(node.parent, ast.BinOp): + node = node.parent + if isinstance(node, ast.BinOp): + _get(node, bits, stop) + return (node, " ".join([x.s for x in bits if isinstance(x, ast.Str)])) + + +def get_called_name(node): + '''Get a function name from an ast.Call node. + + An ast.Call node representing a method call with present differently to one + wrapping a function call: thing.call() vs call(). This helper will grab the + unqualified call name correctly in either case. + + :param node: (ast.Call) the call node + :returns: (String) the function name + ''' + func = node.func + try: + return func.attr if isinstance(func, ast.Attribute) else func.id + except AttributeError: + return "" + + +def get_path_for_function(f): + '''Get the path of the file where the function is defined. + + :returns: the path, or None if one could not be found or f is not a real + function + ''' + + if hasattr(f, "__module__"): + module_name = f.__module__ + elif hasattr(f, "im_func"): + module_name = f.im_func.__module__ + else: + LOG.warning("Cannot resolve file where %s is defined", f) + return None + + module = sys.modules[module_name] + if hasattr(module, "__file__"): + return module.__file__ + else: + LOG.warning("Cannot resolve file path for module %s", module_name) + return None + + +def parse_ini_file(f_loc): + config = configparser.ConfigParser() + try: + config.read(f_loc) + return {k: v for k, v in config.items('bandit')} + + except (configparser.Error, KeyError, TypeError): + LOG.warning("Unable to parse config file %s or missing [bandit] " + "section", f_loc) + + return None + + +def check_ast_node(name): + 'Check if the given name is that of a valid AST node.' + try: + node = getattr(ast, name) + if issubclass(node, ast.AST): + return name + except AttributeError: # nosec(tkelsey): catching expected exception + pass + + raise TypeError("Error: %s is not a valid node type in AST" % name) diff --git a/env_27/lib/python2.7/site-packages/bandit/formatters/__init__.py b/env_27/lib/python2.7/site-packages/bandit/formatters/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/env_27/lib/python2.7/site-packages/bandit/formatters/csv.py b/env_27/lib/python2.7/site-packages/bandit/formatters/csv.py new file mode 100644 index 0000000..11fd1cc --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/formatters/csv.py @@ -0,0 +1,84 @@ +# -*- coding:utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +============= +CSV Formatter +============= + +This formatter outputs the issues in a comma separated values format. + +:Example: + +.. code-block:: none + + filename,test_name,test_id,issue_severity,issue_confidence,issue_text, + line_number,line_range,more_info + examples/yaml_load.py,blacklist_calls,B301,MEDIUM,HIGH,"Use of unsafe yaml + load. Allows instantiation of arbitrary objects. Consider yaml.safe_load(). + ",5,[5],https://bandit.readthedocs.io/en/latest/ + +.. versionadded:: 0.11.0 + +.. versionchanged:: 1.5.0 + New field `more_info` added to output + +""" +# Necessary for this formatter to work when imported on Python 2. Importing +# the standard library's csv module conflicts with the name of this module. +from __future__ import absolute_import + +import csv +import logging +import sys + +from bandit.core import docs_utils + +LOG = logging.getLogger(__name__) + + +def report(manager, fileobj, sev_level, conf_level, lines=-1): + '''Prints issues in CSV format + + :param manager: the bandit manager object + :param fileobj: The output file object, which may be sys.stdout + :param sev_level: Filtering severity level + :param conf_level: Filtering confidence level + :param lines: Number of lines to report, -1 for all + ''' + + results = manager.get_issue_list(sev_level=sev_level, + conf_level=conf_level) + + with fileobj: + fieldnames = ['filename', + 'test_name', + 'test_id', + 'issue_severity', + 'issue_confidence', + 'issue_text', + 'line_number', + 'line_range', + 'more_info'] + + writer = csv.DictWriter(fileobj, fieldnames=fieldnames, + extrasaction='ignore') + writer.writeheader() + for result in results: + r = result.as_dict(with_code=False) + r['more_info'] = docs_utils.get_url(r['test_id']) + writer.writerow(r) + + if fileobj.name != sys.stdout.name: + LOG.info("CSV output written to file: %s", fileobj.name) diff --git a/env_27/lib/python2.7/site-packages/bandit/formatters/custom.py b/env_27/lib/python2.7/site-packages/bandit/formatters/custom.py new file mode 100644 index 0000000..864ff4f --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/formatters/custom.py @@ -0,0 +1,163 @@ +# Copyright (c) 2017 Hewlett Packard Enterprise +# -*- coding:utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +================ +Custom Formatter +================ + +This formatter outputs the issues in custom machine-readable format. + +default template: {abspath}:{line}: {test_id}[bandit]: {severity}: {msg} + +:Example: + +/usr/lib/python3.6/site-packages/openlp/core/utils/__init__.py: \ +405: B310[bandit]: MEDIUM: Audit url open for permitted schemes. \ +Allowing use of file:/ or custom schemes is often unexpected. + +""" + +import logging +import os +import re +import string +import sys + +from bandit.core import test_properties + + +LOG = logging.getLogger(__name__) + + +class SafeMapper(dict): + """Safe mapper to handle format key errors""" + @classmethod # To prevent PEP8 warnings in the test suite + def __missing__(cls, key): + return "{%s}" % key + + +@test_properties.accepts_baseline +def report(manager, fileobj, sev_level, conf_level, lines=-1, template=None): + """Prints issues in custom format + + :param manager: the bandit manager object + :param fileobj: The output file object, which may be sys.stdout + :param sev_level: Filtering severity level + :param conf_level: Filtering confidence level + :param lines: Number of lines to report, -1 for all + :param template: Output template with non-terminal tags + (default: '{abspath}:{line}: + {test_id}[bandit]: {severity}: {msg}') + """ + + machine_output = {'results': [], 'errors': []} + for (fname, reason) in manager.get_skipped(): + machine_output['errors'].append({'filename': fname, + 'reason': reason}) + + results = manager.get_issue_list(sev_level=sev_level, + conf_level=conf_level) + + msg_template = template + if template is None: + msg_template = "{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}" + + # Dictionary of non-terminal tags that will be expanded + tag_mapper = { + 'abspath': lambda issue: os.path.abspath(issue.fname), + 'relpath': lambda issue: os.path.relpath(issue.fname), + 'line': lambda issue: issue.lineno, + 'test_id': lambda issue: issue.test_id, + 'severity': lambda issue: issue.severity, + 'msg': lambda issue: issue.text, + 'confidence': lambda issue: issue.confidence, + 'range': lambda issue: issue.linerange + } + + # Create dictionary with tag sets to speed up search for similar tags + tag_sim_dict = dict( + [(tag, set(tag)) for tag, _ in tag_mapper.items()] + ) + + # Parse the format_string template and check the validity of tags + try: + parsed_template_orig = list(string.Formatter().parse(msg_template)) + # of type (literal_text, field_name, fmt_spec, conversion) + + # Check the format validity only, ignore keys + string.Formatter().vformat(msg_template, (), SafeMapper(line=0)) + except ValueError as e: + LOG.error("Template is not in valid format: %s", e.args[0]) + sys.exit(2) + + tag_set = {t[1] for t in parsed_template_orig if t[1] is not None} + if not tag_set: + LOG.error("No tags were found in the template. Are you missing '{}'?") + sys.exit(2) + + def get_similar_tag(tag): + similarity_list = [(len(set(tag) & t_set), t) + for t, t_set in tag_sim_dict.items()] + return sorted(similarity_list)[-1][1] + + tag_blacklist = [] + for tag in tag_set: + # check if the tag is in dictionary + if tag not in tag_mapper: + similar_tag = get_similar_tag(tag) + LOG.warning( + "Tag '%s' was not recognized and will be skipped, " + "did you mean to use '%s'?", tag, similar_tag + ) + tag_blacklist += [tag] + + # Compose the message template back with the valid values only + msg_parsed_template_list = [] + for literal_text, field_name, fmt_spec, conversion in parsed_template_orig: + if literal_text: + # if there is '{' or '}', double it to prevent expansion + literal_text = re.sub('{', '{{', literal_text) + literal_text = re.sub('}', '}}', literal_text) + msg_parsed_template_list.append(literal_text) + + if field_name is not None: + if field_name in tag_blacklist: + msg_parsed_template_list.append(field_name) + continue + # Append the fmt_spec part + params = [field_name, fmt_spec, conversion] + markers = ['', ':', '!'] + msg_parsed_template_list.append( + ['{'] + + ["%s" % (m + p) if p else '' + for m, p in zip(markers, params)] + + ['}'] + ) + + msg_parsed_template = "".join([item for lst in msg_parsed_template_list + for item in lst]) + "\n" + limit = lines if lines > 0 else None + with fileobj: + for defect in results[:limit]: + evaluated_tags = SafeMapper( + (k, v(defect)) for k, v in tag_mapper.items() + ) + output = msg_parsed_template.format(**evaluated_tags) + + fileobj.write(output) + + if fileobj.name != sys.stdout.name: + LOG.info("Result written to file: %s", fileobj.name) diff --git a/env_27/lib/python2.7/site-packages/bandit/formatters/html.py b/env_27/lib/python2.7/site-packages/bandit/formatters/html.py new file mode 100644 index 0000000..e2fe78a --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/formatters/html.py @@ -0,0 +1,394 @@ +# Copyright (c) 2015 Rackspace, Inc. +# Copyright (c) 2015 Hewlett Packard Enterprise +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +============== +HTML formatter +============== + +This formatter outputs the issues as HTML. + +:Example: + +.. code-block:: html + + + + + + + + + Bandit Report + + + + + + + +
+
+
+ Metrics:
+
+ Total lines of code: 9
+ Total lines skipped (#nosec): 0 +
+
+ + + + +
+
+ +
+
+ yaml_load: Use of unsafe yaml load. Allows + instantiation of arbitrary objects. Consider yaml.safe_load().
+ Test ID: B506
+ Severity: MEDIUM
+ Confidence: HIGH
+ File: examples/yaml_load.py
+ More info: + https://bandit.readthedocs.io/en/latest/plugins/yaml_load.html +
+ +
+
+    5       ystr = yaml.dump({'a' : 1, 'b' : 2, 'c' : 3})
+    6       y = yaml.load(ystr)
+    7       yaml.dump(y)
+    
+
+ + +
+
+ +
+ + + + +.. versionadded:: 0.14.0 + +""" +from __future__ import absolute_import + +import logging +import sys + +import six + +from bandit.core import docs_utils +from bandit.core import test_properties +from bandit.formatters import utils + +if not six.PY2: + from html import escape as html_escape +else: + from cgi import escape as html_escape + +LOG = logging.getLogger(__name__) + + +@test_properties.accepts_baseline +def report(manager, fileobj, sev_level, conf_level, lines=-1): + """Writes issues to 'fileobj' in HTML format + + :param manager: the bandit manager object + :param fileobj: The output file object, which may be sys.stdout + :param sev_level: Filtering severity level + :param conf_level: Filtering confidence level + :param lines: Number of lines to report, -1 for all + """ + + header_block = u""" + + + + + + + + Bandit Report + + + + +""" + + report_block = u""" + +{metrics} +{skipped} + +
+
+ {results} +
+ + + +""" + + issue_block = u""" +
+
+ {test_name}: {test_text}
+ Test ID: {test_id}
+ Severity: {severity}
+ Confidence: {confidence}
+ File: {path}
+ More info: {url}
+{code} +{candidates} +
+
+""" + + code_block = u""" +
+
+{code}
+
+
+""" + + candidate_block = u""" +
+
+Candidates: +{candidate_list} +
+""" + + candidate_issue = u""" +
+
+
{code}
+
+
+""" + + skipped_block = u""" +
+
+
+Skipped files:

+{files_list} +
+
+""" + + metrics_block = u""" +
+
+
+ Metrics:
+
+ Total lines of code: {loc}
+ Total lines skipped (#nosec): {nosec} +
+
+ +""" + + issues = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) + + baseline = not isinstance(issues, list) + + # build the skipped string to insert in the report + skipped_str = ''.join('%s reason: %s
' % (fname, reason) + for fname, reason in manager.get_skipped()) + if skipped_str: + skipped_text = skipped_block.format(files_list=skipped_str) + else: + skipped_text = '' + + # build the results string to insert in the report + results_str = '' + for index, issue in enumerate(issues): + if not baseline or len(issues[issue]) == 1: + candidates = '' + safe_code = html_escape(issue.get_code(lines, True). + strip('\n').lstrip(' ')) + code = code_block.format(code=safe_code) + else: + candidates_str = '' + code = '' + for candidate in issues[issue]: + candidate_code = html_escape(candidate.get_code(lines, True). + strip('\n').lstrip(' ')) + candidates_str += candidate_issue.format(code=candidate_code) + + candidates = candidate_block.format(candidate_list=candidates_str) + + url = docs_utils.get_url(issue.test_id) + results_str += issue_block.format(issue_no=index, + issue_class='issue-sev-{}'. + format(issue.severity.lower()), + test_name=issue.test, + test_id=issue.test_id, + test_text=issue.text, + severity=issue.severity, + confidence=issue.confidence, + path=issue.fname, code=code, + candidates=candidates, + url=url) + + # build the metrics string to insert in the report + metrics_summary = metrics_block.format( + loc=manager.metrics.data['_totals']['loc'], + nosec=manager.metrics.data['_totals']['nosec']) + + # build the report and output it + report_contents = report_block.format(metrics=metrics_summary, + skipped=skipped_text, + results=results_str) + + with fileobj: + wrapped_file = utils.wrap_file_object(fileobj) + wrapped_file.write(utils.convert_file_contents(header_block)) + wrapped_file.write(utils.convert_file_contents(report_contents)) + + if fileobj.name != sys.stdout.name: + LOG.info("HTML output written to file: %s", fileobj.name) diff --git a/env_27/lib/python2.7/site-packages/bandit/formatters/json.py b/env_27/lib/python2.7/site-packages/bandit/formatters/json.py new file mode 100644 index 0000000..0438e34 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/formatters/json.py @@ -0,0 +1,152 @@ +# -*- coding:utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +============== +JSON formatter +============== + +This formatter outputs the issues in JSON. + +:Example: + +.. code-block:: javascript + + { + "errors": [], + "generated_at": "2015-12-16T22:27:34Z", + "metrics": { + "_totals": { + "CONFIDENCE.HIGH": 1, + "CONFIDENCE.LOW": 0, + "CONFIDENCE.MEDIUM": 0, + "CONFIDENCE.UNDEFINED": 0, + "SEVERITY.HIGH": 0, + "SEVERITY.LOW": 0, + "SEVERITY.MEDIUM": 1, + "SEVERITY.UNDEFINED": 0, + "loc": 5, + "nosec": 0 + }, + "examples/yaml_load.py": { + "CONFIDENCE.HIGH": 1, + "CONFIDENCE.LOW": 0, + "CONFIDENCE.MEDIUM": 0, + "CONFIDENCE.UNDEFINED": 0, + "SEVERITY.HIGH": 0, + "SEVERITY.LOW": 0, + "SEVERITY.MEDIUM": 1, + "SEVERITY.UNDEFINED": 0, + "loc": 5, + "nosec": 0 + } + }, + "results": [ + { + "code": "4 ystr = yaml.dump({'a' : 1, 'b' : 2, 'c' : 3})\n5 + y = yaml.load(ystr)\n6 yaml.dump(y)\n", + "filename": "examples/yaml_load.py", + "issue_confidence": "HIGH", + "issue_severity": "MEDIUM", + "issue_text": "Use of unsafe yaml load. Allows instantiation of + arbitrary objects. Consider yaml.safe_load().\n", + "line_number": 5, + "line_range": [ + 5 + ], + "more_info": "https://bandit.readthedocs.io/en/latest/", + "test_name": "blacklist_calls", + "test_id": "B301" + } + ] + } + +.. versionadded:: 0.10.0 + +""" +# Necessary so we can import the standard library json module while continuing +# to name this file json.py. (Python 2 only) +from __future__ import absolute_import + +import datetime +import json +import logging +import operator +import sys + +from bandit.core import docs_utils +from bandit.core import test_properties + +LOG = logging.getLogger(__name__) + + +@test_properties.accepts_baseline +def report(manager, fileobj, sev_level, conf_level, lines=-1): + '''''Prints issues in JSON format + + :param manager: the bandit manager object + :param fileobj: The output file object, which may be sys.stdout + :param sev_level: Filtering severity level + :param conf_level: Filtering confidence level + :param lines: Number of lines to report, -1 for all + ''' + + machine_output = {'results': [], 'errors': []} + for (fname, reason) in manager.get_skipped(): + machine_output['errors'].append({'filename': fname, + 'reason': reason}) + + results = manager.get_issue_list(sev_level=sev_level, + conf_level=conf_level) + + baseline = not isinstance(results, list) + + if baseline: + collector = [] + for r in results: + d = r.as_dict() + d['more_info'] = docs_utils.get_url(d['test_id']) + if len(results[r]) > 1: + d['candidates'] = [c.as_dict() for c in results[r]] + collector.append(d) + + else: + collector = [r.as_dict() for r in results] + for elem in collector: + elem['more_info'] = docs_utils.get_url(elem['test_id']) + + itemgetter = operator.itemgetter + if manager.agg_type == 'vuln': + machine_output['results'] = sorted(collector, + key=itemgetter('test_name')) + else: + machine_output['results'] = sorted(collector, + key=itemgetter('filename')) + + machine_output['metrics'] = manager.metrics.data + + # timezone agnostic format + TS_FORMAT = "%Y-%m-%dT%H:%M:%SZ" + + time_string = datetime.datetime.utcnow().strftime(TS_FORMAT) + machine_output['generated_at'] = time_string + + result = json.dumps(machine_output, sort_keys=True, + indent=2, separators=(',', ': ')) + + with fileobj: + fileobj.write(result) + + if fileobj.name != sys.stdout.name: + LOG.info("JSON output written to file: %s", fileobj.name) diff --git a/env_27/lib/python2.7/site-packages/bandit/formatters/screen.py b/env_27/lib/python2.7/site-packages/bandit/formatters/screen.py new file mode 100644 index 0000000..229c6c5 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/formatters/screen.py @@ -0,0 +1,187 @@ +# Copyright (c) 2015 Hewlett Packard Enterprise +# -*- coding:utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +================ +Screen formatter +================ + +This formatter outputs the issues as color coded text to screen. + +:Example: + +.. code-block:: none + + >> Issue: [B506: yaml_load] Use of unsafe yaml load. Allows + instantiation of arbitrary objects. Consider yaml.safe_load(). + + Severity: Medium Confidence: High + Location: examples/yaml_load.py:5 + More Info: https://bandit.readthedocs.io/en/latest/ + 4 ystr = yaml.dump({'a' : 1, 'b' : 2, 'c' : 3}) + 5 y = yaml.load(ystr) + 6 yaml.dump(y) + +.. versionadded:: 0.9.0 + +""" + +from __future__ import print_function + +import datetime +import logging +import sys + +from bandit.core import constants +from bandit.core import docs_utils +from bandit.core import test_properties + +LOG = logging.getLogger(__name__) + +COLOR = { + 'DEFAULT': '\033[0m', + 'HEADER': '\033[95m', + 'LOW': '\033[94m', + 'MEDIUM': '\033[93m', + 'HIGH': '\033[91m', +} + + +def header(text, *args): + return u'%s%s%s' % (COLOR['HEADER'], (text % args), COLOR['DEFAULT']) + + +def get_verbose_details(manager): + bits = [] + bits.append(header(u'Files in scope (%i):', len(manager.files_list))) + tpl = u"\t%s (score: {SEVERITY: %i, CONFIDENCE: %i})" + bits.extend([tpl % (item, sum(score['SEVERITY']), sum(score['CONFIDENCE'])) + for (item, score) + in zip(manager.files_list, manager.scores)]) + bits.append(header(u'Files excluded (%i):', len(manager.excluded_files))) + bits.extend([u"\t%s" % fname for fname in manager.excluded_files]) + return '\n'.join([str(bit) for bit in bits]) + + +def get_metrics(manager): + bits = [] + bits.append(header("\nRun metrics:")) + for (criteria, default) in constants.CRITERIA: + bits.append("\tTotal issues (by %s):" % (criteria.lower())) + for rank in constants.RANKING: + bits.append("\t\t%s: %s" % ( + rank.capitalize(), + manager.metrics.data['_totals']['%s.%s' % (criteria, rank)])) + return '\n'.join([str(bit) for bit in bits]) + + +def _output_issue_str(issue, indent, show_lineno=True, show_code=True, + lines=-1): + # returns a list of lines that should be added to the existing lines list + bits = [] + bits.append("%s%s>> Issue: [%s:%s] %s" % ( + indent, COLOR[issue.severity], issue.test_id, issue.test, issue.text)) + + bits.append("%s Severity: %s Confidence: %s" % ( + indent, issue.severity.capitalize(), issue.confidence.capitalize())) + + bits.append("%s Location: %s:%s%s" % ( + indent, issue.fname, + issue.lineno if show_lineno else "", + COLOR['DEFAULT'])) + + bits.append("%s More Info: %s" % ( + indent, docs_utils.get_url(issue.test_id))) + + if show_code: + bits.extend([indent + l for l in + issue.get_code(lines, True).split('\n')]) + + return '\n'.join([bit for bit in bits]) + + +def get_results(manager, sev_level, conf_level, lines): + bits = [] + issues = manager.get_issue_list(sev_level, conf_level) + baseline = not isinstance(issues, list) + candidate_indent = ' ' * 10 + + if not len(issues): + return u"\tNo issues identified." + + for issue in issues: + # if not a baseline or only one candidate we know the issue + if not baseline or len(issues[issue]) == 1: + bits.append(_output_issue_str(issue, "", lines=lines)) + + # otherwise show the finding and the candidates + else: + bits.append(_output_issue_str(issue, "", + show_lineno=False, + show_code=False)) + + bits.append(u'\n-- Candidate Issues --') + for candidate in issues[issue]: + bits.append(_output_issue_str(candidate, + candidate_indent, + lines=lines)) + bits.append('\n') + bits.append(u'-' * 50) + + return '\n'.join([bit for bit in bits]) + + +def do_print(bits): + # needed so we can mock this stuff + print('\n'.join([bit for bit in bits])) + + +@test_properties.accepts_baseline +def report(manager, fileobj, sev_level, conf_level, lines=-1): + """Prints discovered issues formatted for screen reading + + This makes use of VT100 terminal codes for colored text. + + :param manager: the bandit manager object + :param fileobj: The output file object, which may be sys.stdout + :param sev_level: Filtering severity level + :param conf_level: Filtering confidence level + :param lines: Number of lines to report, -1 for all + """ + + bits = [] + bits.append(header("Run started:%s", datetime.datetime.utcnow())) + + if manager.verbose: + bits.append(get_verbose_details(manager)) + + bits.append(header("\nTest results:")) + bits.append(get_results(manager, sev_level, conf_level, lines)) + bits.append(header("\nCode scanned:")) + bits.append('\tTotal lines of code: %i' % + (manager.metrics.data['_totals']['loc'])) + + bits.append('\tTotal lines skipped (#nosec): %i' % + (manager.metrics.data['_totals']['nosec'])) + + bits.append(get_metrics(manager)) + skipped = manager.get_skipped() + bits.append(header("Files skipped (%i):", len(skipped))) + bits.extend(["\t%s (%s)" % skip for skip in skipped]) + do_print(bits) + + if fileobj.name != sys.stdout.name: + LOG.info("Screen formatter output was not written to file: %s, " + "consider '-f txt'", fileobj.name) diff --git a/env_27/lib/python2.7/site-packages/bandit/formatters/text.py b/env_27/lib/python2.7/site-packages/bandit/formatters/text.py new file mode 100644 index 0000000..bff914e --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/formatters/text.py @@ -0,0 +1,169 @@ +# Copyright (c) 2015 Hewlett Packard Enterprise +# -*- coding:utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +============== +Text Formatter +============== + +This formatter outputs the issues as plain text. + +:Example: + +.. code-block:: none + + >> Issue: [B301:blacklist_calls] Use of unsafe yaml load. Allows + instantiation of arbitrary objects. Consider yaml.safe_load(). + + Severity: Medium Confidence: High + Location: examples/yaml_load.py:5 + More Info: https://bandit.readthedocs.io/en/latest/ + 4 ystr = yaml.dump({'a' : 1, 'b' : 2, 'c' : 3}) + 5 y = yaml.load(ystr) + 6 yaml.dump(y) + +.. versionadded:: 0.9.0 + +""" + +from __future__ import print_function + +import datetime +import logging +import sys + +from bandit.core import constants +from bandit.core import docs_utils +from bandit.core import test_properties +from bandit.formatters import utils + +LOG = logging.getLogger(__name__) + + +def get_verbose_details(manager): + bits = [] + bits.append(u'Files in scope (%i):' % len(manager.files_list)) + tpl = u"\t%s (score: {SEVERITY: %i, CONFIDENCE: %i})" + bits.extend([tpl % (item, sum(score['SEVERITY']), sum(score['CONFIDENCE'])) + for (item, score) + in zip(manager.files_list, manager.scores)]) + bits.append(u'Files excluded (%i):' % len(manager.excluded_files)) + bits.extend([u"\t%s" % fname for fname in manager.excluded_files]) + return '\n'.join([bit for bit in bits]) + + +def get_metrics(manager): + bits = [] + bits.append("\nRun metrics:") + for (criteria, default) in constants.CRITERIA: + bits.append("\tTotal issues (by %s):" % (criteria.lower())) + for rank in constants.RANKING: + bits.append("\t\t%s: %s" % ( + rank.capitalize(), + manager.metrics.data['_totals']['%s.%s' % (criteria, rank)])) + return '\n'.join([bit for bit in bits]) + + +def _output_issue_str(issue, indent, show_lineno=True, show_code=True, + lines=-1): + # returns a list of lines that should be added to the existing lines list + bits = [] + bits.append("%s>> Issue: [%s:%s] %s" % ( + indent, issue.test_id, issue.test, issue.text)) + + bits.append("%s Severity: %s Confidence: %s" % ( + indent, issue.severity.capitalize(), issue.confidence.capitalize())) + + bits.append("%s Location: %s:%s" % ( + indent, issue.fname, issue.lineno if show_lineno else "")) + + bits.append("%s More Info: %s" % ( + indent, docs_utils.get_url(issue.test_id))) + + if show_code: + bits.extend([indent + l for l in + issue.get_code(lines, True).split('\n')]) + + return '\n'.join([bit for bit in bits]) + + +def get_results(manager, sev_level, conf_level, lines): + bits = [] + issues = manager.get_issue_list(sev_level, conf_level) + baseline = not isinstance(issues, list) + candidate_indent = ' ' * 10 + + if not len(issues): + return u"\tNo issues identified." + + for issue in issues: + # if not a baseline or only one candidate we know the issue + if not baseline or len(issues[issue]) == 1: + bits.append(_output_issue_str(issue, "", lines=lines)) + + # otherwise show the finding and the candidates + else: + bits.append(_output_issue_str(issue, "", + show_lineno=False, + show_code=False)) + + bits.append(u'\n-- Candidate Issues --') + for candidate in issues[issue]: + bits.append(_output_issue_str(candidate, + candidate_indent, + lines=lines)) + bits.append('\n') + bits.append(u'-' * 50) + return '\n'.join([bit for bit in bits]) + + +@test_properties.accepts_baseline +def report(manager, fileobj, sev_level, conf_level, lines=-1): + """Prints discovered issues in the text format + + :param manager: the bandit manager object + :param fileobj: The output file object, which may be sys.stdout + :param sev_level: Filtering severity level + :param conf_level: Filtering confidence level + :param lines: Number of lines to report, -1 for all + """ + + bits = [] + bits.append("Run started:%s" % datetime.datetime.utcnow()) + + if manager.verbose: + bits.append(get_verbose_details(manager)) + + bits.append("\nTest results:") + bits.append(get_results(manager, sev_level, conf_level, lines)) + bits.append("\nCode scanned:") + bits.append('\tTotal lines of code: %i' % + (manager.metrics.data['_totals']['loc'])) + + bits.append('\tTotal lines skipped (#nosec): %i' % + (manager.metrics.data['_totals']['nosec'])) + + skipped = manager.get_skipped() + bits.append(get_metrics(manager)) + bits.append("Files skipped (%i):" % len(skipped)) + bits.extend(["\t%s (%s)" % skip for skip in skipped]) + result = '\n'.join([bit for bit in bits]) + '\n' + + with fileobj: + wrapped_file = utils.wrap_file_object(fileobj) + wrapped_file.write(utils.convert_file_contents(result)) + + if fileobj.name != sys.stdout.name: + LOG.info("Text output written to file: %s", fileobj.name) diff --git a/env_27/lib/python2.7/site-packages/bandit/formatters/utils.py b/env_27/lib/python2.7/site-packages/bandit/formatters/utils.py new file mode 100644 index 0000000..48400d9 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/formatters/utils.py @@ -0,0 +1,43 @@ +# Copyright (c) 2016 Rackspace, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Utility functions for formatting plugins for Bandit.""" + +import io + +import six + + +def wrap_file_object(fileobj): + """Handle differences in Python 2 and 3 around writing bytes.""" + # If it's not an instance of IOBase, we're probably using Python 2 and + # that is less finnicky about writing text versus bytes to a file. + if not isinstance(fileobj, io.IOBase): + return fileobj + + # At this point we're using Python 3 and that will mangle text written to + # a file written in bytes mode. So, let's check if the file can handle + # text as opposed to bytes. + if isinstance(fileobj, io.TextIOBase): + return fileobj + + # Finally, we've determined that the fileobj passed in cannot handle text, + # so we use TextIOWrapper to handle the conversion for us. + return io.TextIOWrapper(fileobj) + + +def convert_file_contents(text): + """Convert text to built-in strings on Python 2.""" + if not six.PY2: + return text + return str(text.encode('utf-8')) diff --git a/env_27/lib/python2.7/site-packages/bandit/formatters/xml.py b/env_27/lib/python2.7/site-packages/bandit/formatters/xml.py new file mode 100644 index 0000000..ec82ad3 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/formatters/xml.py @@ -0,0 +1,96 @@ +# -*- coding:utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +============= +XML Formatter +============= + +This formatter outputs the issues as XML. + +:Example: + +.. code-block:: xml + + + Test ID: B301 + Severity: MEDIUM Confidence: HIGH Use of unsafe yaml load. Allows + instantiation of arbitrary objects. Consider yaml.safe_load(). + + Location examples/yaml_load.py:5 + +.. versionadded:: 0.12.0 + +""" +# This future import is necessary here due to the xml import below on Python +# 2.7 +from __future__ import absolute_import + +import logging +import sys +from xml.etree import cElementTree as ET + +import six + +from bandit.core import docs_utils + +LOG = logging.getLogger(__name__) + + +def report(manager, fileobj, sev_level, conf_level, lines=-1): + '''Prints issues in XML format + + :param manager: the bandit manager object + :param fileobj: The output file object, which may be sys.stdout + :param sev_level: Filtering severity level + :param conf_level: Filtering confidence level + :param lines: Number of lines to report, -1 for all + ''' + + issues = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level) + root = ET.Element('testsuite', name='bandit', tests=str(len(issues))) + + for issue in issues: + test = issue.test + testcase = ET.SubElement(root, 'testcase', + classname=issue.fname, name=test) + + text = 'Test ID: %s Severity: %s Confidence: %s\n%s\nLocation %s:%s' + text = text % (issue.test_id, issue.severity, issue.confidence, + issue.text, issue.fname, issue.lineno) + ET.SubElement(testcase, 'error', + more_info=docs_utils.get_url(issue.test_id), + type=issue.severity, + message=issue.text).text = text + + tree = ET.ElementTree(root) + + if fileobj.name == sys.stdout.name: + if six.PY2: + fileobj = sys.stdout + else: + fileobj = sys.stdout.buffer + elif fileobj.mode == 'w': + fileobj.close() + fileobj = open(fileobj.name, "wb") + + with fileobj: + tree.write(fileobj, encoding='utf-8', xml_declaration=True) + + if fileobj.name != sys.stdout.name: + LOG.info("XML output written to file: %s", fileobj.name) diff --git a/env_27/lib/python2.7/site-packages/bandit/formatters/yaml.py b/env_27/lib/python2.7/site-packages/bandit/formatters/yaml.py new file mode 100644 index 0000000..6b8db50 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/formatters/yaml.py @@ -0,0 +1,131 @@ +# Copyright (c) 2017 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +============== +YAML Formatter +============== + +This formatter outputs the issues in a yaml format. + +:Example: + +.. code-block:: none + + errors: [] + generated_at: '2017-03-09T22:29:30Z' + metrics: + _totals: + CONFIDENCE.HIGH: 1 + CONFIDENCE.LOW: 0 + CONFIDENCE.MEDIUM: 0 + CONFIDENCE.UNDEFINED: 0 + SEVERITY.HIGH: 0 + SEVERITY.LOW: 0 + SEVERITY.MEDIUM: 1 + SEVERITY.UNDEFINED: 0 + loc: 9 + nosec: 0 + examples/yaml_load.py: + CONFIDENCE.HIGH: 1 + CONFIDENCE.LOW: 0 + CONFIDENCE.MEDIUM: 0 + CONFIDENCE.UNDEFINED: 0 + SEVERITY.HIGH: 0 + SEVERITY.LOW: 0 + SEVERITY.MEDIUM: 1 + SEVERITY.UNDEFINED: 0 + loc: 9 + nosec: 0 + results: + - code: '5 ystr = yaml.dump({''a'' : 1, ''b'' : 2, ''c'' : 3})\n + 6 y = yaml.load(ystr)\n7 yaml.dump(y)\n' + filename: examples/yaml_load.py + issue_confidence: HIGH + issue_severity: MEDIUM + issue_text: Use of unsafe yaml load. Allows instantiation of arbitrary + objects. + Consider yaml.safe_load(). + line_number: 6 + line_range: + - 6 + more_info: https://bandit.readthedocs.io/en/latest/ + test_id: B506 + test_name: yaml_load + +.. versionadded:: 1.5.0 + +""" +# Necessary for this formatter to work when imported on Python 2. Importing +# the standard library's yaml module conflicts with the name of this module. +from __future__ import absolute_import + +import datetime +import logging +import operator +import sys + +import yaml + +from bandit.core import docs_utils + +LOG = logging.getLogger(__name__) + + +def report(manager, fileobj, sev_level, conf_level, lines=-1): + '''Prints issues in YAML format + + :param manager: the bandit manager object + :param fileobj: The output file object, which may be sys.stdout + :param sev_level: Filtering severity level + :param conf_level: Filtering confidence level + :param lines: Number of lines to report, -1 for all + ''' + + machine_output = {'results': [], 'errors': []} + for (fname, reason) in manager.get_skipped(): + machine_output['errors'].append({'filename': fname, 'reason': reason}) + + results = manager.get_issue_list(sev_level=sev_level, + conf_level=conf_level) + + collector = [r.as_dict() for r in results] + for elem in collector: + elem['more_info'] = docs_utils.get_url(elem['test_id']) + + itemgetter = operator.itemgetter + if manager.agg_type == 'vuln': + machine_output['results'] = sorted(collector, + key=itemgetter('test_name')) + else: + machine_output['results'] = sorted(collector, + key=itemgetter('filename')) + + machine_output['metrics'] = manager.metrics.data + + for result in machine_output['results']: + if 'code' in result: + code = result['code'].replace('\n', '\\n') + result['code'] = code + + # timezone agnostic format + TS_FORMAT = "%Y-%m-%dT%H:%M:%SZ" + + time_string = datetime.datetime.utcnow().strftime(TS_FORMAT) + machine_output['generated_at'] = time_string + + yaml.safe_dump(machine_output, fileobj, default_flow_style=False) + + if fileobj.name != sys.stdout.name: + LOG.info("YAML output written to file: %s", fileobj.name) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/__init__.py b/env_27/lib/python2.7/site-packages/bandit/plugins/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/app_debug.py b/env_27/lib/python2.7/site-packages/bandit/plugins/app_debug.py new file mode 100644 index 0000000..9c6a430 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/app_debug.py @@ -0,0 +1,69 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +====================================================== +B201: Test for use of flask app with debug set to true +====================================================== + +Running Flask applications in debug mode results in the Werkzeug debugger +being enabled. This includes a feature that allows arbitrary code execution. +Documentation for both Flask [1]_ and Werkzeug [2]_ strongly suggests that +debug mode should never be enabled on production systems. + +Operating a production server with debug mode enabled was the probable cause +of the Patreon breach in 2015 [3]_. + +:Example: + +.. code-block:: none + + >> Issue: A Flask app appears to be run with debug=True, which exposes + the Werkzeug debugger and allows the execution of arbitrary code. + Severity: High Confidence: High + Location: examples/flask_debug.py:10 + 9 #bad + 10 app.run(debug=True) + 11 + +.. seealso:: + + .. [1] http://flask.pocoo.org/docs/0.10/quickstart/#debug-mode + .. [2] http://werkzeug.pocoo.org/docs/0.10/debug/ + .. [3] http://labs.detectify.com/post/130332638391/how-patreon-got-hacked-publicly-exposed-werkzeug # noqa + +.. versionadded:: 0.15.0 + +""" + +import bandit +from bandit.core import test_properties as test + + +@test.test_id('B201') +@test.checks('Call') +def flask_debug_true(context): + if context.is_module_imported_like('flask'): + if context.call_function_name_qual.endswith('.run'): + if context.check_call_arg_value('debug', 'True'): + return bandit.Issue( + severity=bandit.HIGH, + confidence=bandit.MEDIUM, + text="A Flask app appears to be run with debug=True, " + "which exposes the Werkzeug debugger and allows " + "the execution of arbitrary code.", + lineno=context.get_lineno_for_call_arg('debug'), + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/asserts.py b/env_27/lib/python2.7/site-packages/bandit/plugins/asserts.py new file mode 100644 index 0000000..ce0b0c4 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/asserts.py @@ -0,0 +1,65 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +============================ +B101: Test for use of assert +============================ + +This plugin test checks for the use of the Python ``assert`` keyword. It was +discovered that some projects used assert to enforce interface constraints. +However, assert is removed with compiling to optimised byte code (python -o +producing \*.pyo files). This caused various protections to be removed. The use +of assert is also considered as general bad practice in OpenStack codebases. + +Please see +https://docs.python.org/2/reference/simple_stmts.html#the-assert-statement for +more info on ``assert`` + +:Example: + +.. code-block:: none + + >> Issue: Use of assert detected. The enclosed code will be removed when + compiling to optimised byte code. + Severity: Low Confidence: High + Location: ./examples/assert.py:1 + 1 assert logged_in + 2 display_assets() + +.. seealso:: + + - https://bugs.launchpad.net/juniperopenstack/+bug/1456193 + - https://bugs.launchpad.net/heat/+bug/1397883 + - https://docs.python.org/2/reference/simple_stmts.html#the-assert-statement + +.. versionadded:: 0.11.0 + +""" + +import bandit +from bandit.core import test_properties as test + + +@test.test_id('B101') +@test.checks('Assert') +def assert_used(context): + return bandit.Issue( + severity=bandit.LOW, + confidence=bandit.HIGH, + text=("Use of assert detected. The enclosed code " + "will be removed when compiling to optimised byte code.") + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/crypto_request_no_cert_validation.py b/env_27/lib/python2.7/site-packages/bandit/plugins/crypto_request_no_cert_validation.py new file mode 100644 index 0000000..21881b9 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/crypto_request_no_cert_validation.py @@ -0,0 +1,72 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +============================================= +B501: Test for missing certificate validation +============================================= + +Encryption in general is typically critical to the security of many +applications. Using TLS can greatly increase security by guaranteeing the +identity of the party you are communicating with. This is accomplished by one +or both parties presenting trusted certificates during the connection +initialization phase of TLS. + +When request methods are used certificates are validated automatically which is +the desired behavior. If certificate validation is explicitly turned off +Bandit will return a HIGH severity error. + + +:Example: + +.. code-block:: none + + >> Issue: [request_with_no_cert_validation] Requests call with verify=False + disabling SSL certificate checks, security issue. + Severity: High Confidence: High + Location: examples/requests-ssl-verify-disabled.py:4 + 3 requests.get('https://gmail.com', verify=True) + 4 requests.get('https://gmail.com', verify=False) + 5 requests.post('https://gmail.com', verify=True) + +.. seealso:: + + - https://security.openstack.org/guidelines/dg_move-data-securely.html + - https://security.openstack.org/guidelines/dg_validate-certificates.html + +.. versionadded:: 0.9.0 + +""" + +import bandit +from bandit.core import test_properties as test + + +@test.checks('Call') +@test.test_id('B501') +def request_with_no_cert_validation(context): + http_verbs = ('get', 'options', 'head', 'post', 'put', 'patch', 'delete') + if ('requests' in context.call_function_name_qual and + context.call_function_name in http_verbs): + if context.check_call_arg_value('verify', 'False'): + issue = bandit.Issue( + severity=bandit.HIGH, + confidence=bandit.HIGH, + text="Requests call with verify=False disabling SSL " + "certificate checks, security issue.", + lineno=context.get_lineno_for_call_arg('verify'), + ) + return issue diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/django_sql_injection.py b/env_27/lib/python2.7/site-packages/bandit/plugins/django_sql_injection.py new file mode 100644 index 0000000..1da2f81 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/django_sql_injection.py @@ -0,0 +1,117 @@ +# -*- coding:utf-8 -*- +# +# Copyright (C) 2018 [Victor Torre](https://github.com/ehooo) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import ast + +import bandit +from bandit.core import test_properties as test + + +def keywords2dict(keywords): + kwargs = {} + for node in keywords: + if isinstance(node, ast.keyword): + kwargs[node.arg] = node.value + return kwargs + + +@test.checks('Call') +@test.test_id('B610') +def django_extra_used(context): + """**B610: Potential SQL injection on extra function** + + .. seealso:: + + - https://docs.djangoproject.com/en/dev/topics/ + security/#sql-injection-protection + + .. versionadded:: 1.5.0 + + """ + description = "Use of extra potential SQL attack vector." + if context.call_function_name == 'extra': + kwargs = keywords2dict(context.node.keywords) + args = context.node.args + if args: + if len(args) >= 1: + kwargs['select'] = args[0] + if len(args) >= 2: + kwargs['where'] = args[1] + if len(args) >= 3: + kwargs['params'] = args[2] + if len(args) >= 4: + kwargs['tables'] = args[3] + if len(args) >= 5: + kwargs['order_by'] = args[4] + if len(args) >= 6: + kwargs['select_params'] = args[5] + insecure = False + for key in ['where', 'tables']: + if key in kwargs: + if isinstance(kwargs[key], ast.List): + for val in kwargs[key].elts: + if not isinstance(val, ast.Str): + insecure = True + break + else: + insecure = True + break + if not insecure and 'select' in kwargs: + if isinstance(kwargs['select'], ast.Dict): + for k in kwargs['select'].keys: + if not isinstance(k, ast.Str): + insecure = True + break + if not insecure: + for v in kwargs['select'].values: + if not isinstance(v, ast.Str): + insecure = True + break + else: + insecure = True + + if insecure: + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.MEDIUM, + text=description + ) + + +@test.checks('Call') +@test.test_id('B611') +def django_rawsql_used(context): + """**B611: Potential SQL injection on RawSQL function** + + .. seealso:: + + - https://docs.djangoproject.com/en/dev/topics/ + security/#sql-injection-protection + + .. versionadded:: 1.5.0 + + """ + description = "Use of RawSQL potential SQL attack vector." + if context.is_module_imported_like('django.db.models'): + if context.call_function_name == 'RawSQL': + sql = context.node.args[0] + if not isinstance(sql, ast.Str): + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.MEDIUM, + text=description + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/django_xss.py b/env_27/lib/python2.7/site-packages/bandit/plugins/django_xss.py new file mode 100644 index 0000000..1a86a37 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/django_xss.py @@ -0,0 +1,295 @@ +# +# Copyright 2018 Victor Torre +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import ast + +import six + +import bandit +from bandit.core import test_properties as test + + +class DeepAssignation(object): + def __init__(self, var_name, ignore_nodes=None): + self.var_name = var_name + self.ignore_nodes = ignore_nodes + + def is_assigned_in(self, items): + assigned = [] + for ast_inst in items: + new_assigned = self.is_assigned(ast_inst) + if new_assigned: + if isinstance(new_assigned, (list, tuple)): + assigned.extend(new_assigned) + else: + assigned.append(new_assigned) + return assigned + + def is_assigned(self, node): + assigned = False + if self.ignore_nodes: + if isinstance(self.ignore_nodes, (list, tuple, object)): + if isinstance(node, self.ignore_nodes): + return assigned + + if isinstance(node, ast.Expr): + assigned = self.is_assigned(node.value) + elif isinstance(node, ast.FunctionDef): + for name in node.args.args: + if isinstance(name, ast.Name): + if name.id == self.var_name.id: + # If is param the assignations are not affected + return assigned + assigned = self.is_assigned_in(node.body) + elif isinstance(node, ast.With): + if six.PY2: + if node.optional_vars.id == self.var_name.id: + assigned = node + else: + assigned = self.is_assigned_in(node.body) + else: + for withitem in node.items: + if withitem.optional_vars.id == self.var_name.id: + assigned = node + else: + assigned = self.is_assigned_in(node.body) + elif six.PY2 and isinstance(node, ast.TryFinally): + assigned = [] + assigned.extend(self.is_assigned_in(node.body)) + assigned.extend(self.is_assigned_in(node.finalbody)) + elif six.PY2 and isinstance(node, ast.TryExcept): + assigned = [] + assigned.extend(self.is_assigned_in(node.body)) + assigned.extend(self.is_assigned_in(node.handlers)) + assigned.extend(self.is_assigned_in(node.orelse)) + elif not six.PY2 and isinstance(node, ast.Try): + assigned = [] + assigned.extend(self.is_assigned_in(node.body)) + assigned.extend(self.is_assigned_in(node.handlers)) + assigned.extend(self.is_assigned_in(node.orelse)) + assigned.extend(self.is_assigned_in(node.finalbody)) + elif isinstance(node, ast.ExceptHandler): + assigned = [] + assigned.extend(self.is_assigned_in(node.body)) + elif isinstance(node, (ast.If, ast.For, ast.While)): + assigned = [] + assigned.extend(self.is_assigned_in(node.body)) + assigned.extend(self.is_assigned_in(node.orelse)) + elif isinstance(node, ast.AugAssign): + if isinstance(node.target, ast.Name): + if node.target.id == self.var_name.id: + assigned = node.value + elif isinstance(node, ast.Assign) and node.targets: + target = node.targets[0] + if isinstance(target, ast.Name): + if target.id == self.var_name.id: + assigned = node.value + elif isinstance(target, ast.Tuple): + pos = 0 + for name in target.elts: + if name.id == self.var_name.id: + assigned = node.value.elts[pos] + break + pos += 1 + return assigned + + +def evaluate_var(xss_var, parent, until, ignore_nodes=None): + secure = False + if isinstance(xss_var, ast.Name): + if isinstance(parent, ast.FunctionDef): + for name in parent.args.args: + arg_name = name.id if six.PY2 else name.arg + if arg_name == xss_var.id: + return False # Params are not secure + + analyser = DeepAssignation(xss_var, ignore_nodes) + for node in parent.body: + if node.lineno >= until: + break + to = analyser.is_assigned(node) + if to: + if isinstance(to, ast.Str): + secure = True + elif isinstance(to, ast.Name): + secure = evaluate_var(to, parent, + to.lineno, ignore_nodes) + elif isinstance(to, ast.Call): + secure = evaluate_call(to, parent, ignore_nodes) + elif isinstance(to, (list, tuple)): + num_secure = 0 + for some_to in to: + if isinstance(some_to, ast.Str): + num_secure += 1 + elif isinstance(some_to, ast.Name): + if evaluate_var(some_to, parent, + node.lineno, ignore_nodes): + num_secure += 1 + else: + break + else: + break + if num_secure == len(to): + secure = True + else: + secure = False + break + else: + secure = False + break + return secure + + +def evaluate_call(call, parent, ignore_nodes=None): + secure = False + evaluate = False + if isinstance(call, ast.Call) and isinstance(call.func, ast.Attribute): + if isinstance(call.func.value, ast.Str) and call.func.attr == 'format': + evaluate = True + if call.keywords or (six.PY2 and call.kwargs): + evaluate = False # TODO(??) get support for this + + if evaluate: + args = list(call.args) + if six.PY2 and call.starargs and isinstance(call.starargs, + (ast.List, ast.Tuple)): + args.extend(call.starargs.elts) + + num_secure = 0 + for arg in args: + if isinstance(arg, ast.Str): + num_secure += 1 + elif isinstance(arg, ast.Name): + if evaluate_var(arg, parent, call.lineno, ignore_nodes): + num_secure += 1 + else: + break + elif isinstance(arg, ast.Call): + if evaluate_call(arg, parent, ignore_nodes): + num_secure += 1 + else: + break + elif not six.PY2 and isinstance(arg, ast.Starred) and isinstance( + arg.value, (ast.List, ast.Tuple)): + args.extend(arg.value.elts) + num_secure += 1 + else: + break + secure = num_secure == len(args) + + return secure + + +def transform2call(var): + if isinstance(var, ast.BinOp): + is_mod = isinstance(var.op, ast.Mod) + is_left_str = isinstance(var.left, ast.Str) + if is_mod and is_left_str: + new_call = ast.Call() + new_call.args = [] + new_call.args = [] + if six.PY2: + new_call.starargs = None + new_call.keywords = None + if six.PY2: + new_call.kwargs = None + new_call.lineno = var.lineno + new_call.func = ast.Attribute() + new_call.func.value = var.left + new_call.func.attr = 'format' + if isinstance(var.right, ast.Tuple): + new_call.args = var.right.elts + elif six.PY2 and isinstance(var.right, ast.Dict): + new_call.kwargs = var.right + else: + new_call.args = [var.right] + return new_call + + +def check_risk(node): + description = "Potential XSS on mark_safe function." + xss_var = node.args[0] + + secure = False + + if isinstance(xss_var, ast.Name): + # Check if the var are secure + parent = node.parent + while not isinstance(parent, (ast.Module, ast.FunctionDef)): + parent = parent.parent + + is_param = False + if isinstance(parent, ast.FunctionDef): + for name in parent.args.args: + arg_name = name.id if six.PY2 else name.arg + if arg_name == xss_var.id: + is_param = True + break + + if not is_param: + secure = evaluate_var(xss_var, parent, node.lineno) + elif isinstance(xss_var, ast.Call): + parent = node.parent + while not isinstance(parent, (ast.Module, ast.FunctionDef)): + parent = parent.parent + secure = evaluate_call(xss_var, parent) + elif isinstance(xss_var, ast.BinOp): + is_mod = isinstance(xss_var.op, ast.Mod) + is_left_str = isinstance(xss_var.left, ast.Str) + if is_mod and is_left_str: + parent = node.parent + while not isinstance(parent, (ast.Module, ast.FunctionDef)): + parent = parent.parent + new_call = transform2call(xss_var) + secure = evaluate_call(new_call, parent) + + if not secure: + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.HIGH, + text=description + ) + + +@test.checks('Call') +@test.test_id('B703') +def django_mark_safe(context): + """**B703: Potential XSS on mark_safe function** + + .. seealso:: + + - https://docs.djangoproject.com/en/dev/topics/ + security/#cross-site-scripting-xss-protection + - https://docs.djangoproject.com/en/dev/ + ref/utils/#module-django.utils.safestring + - https://docs.djangoproject.com/en/dev/ + ref/utils/#django.utils.html.format_html + + .. versionadded:: 1.5.0 + + """ + if context.is_module_imported_like('django.utils.safestring'): + affected_functions = [ + 'mark_safe', + 'SafeText', + 'SafeUnicode', + 'SafeString', + 'SafeBytes' + ] + if context.call_function_name in affected_functions: + xss = context.node.args[0] + if not isinstance(xss, ast.Str): + return check_risk(context.node) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/exec.py b/env_27/lib/python2.7/site-packages/bandit/plugins/exec.py new file mode 100644 index 0000000..92c5e4b --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/exec.py @@ -0,0 +1,68 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +============================== +B102: Test for the use of exec +============================== + +This plugin test checks for the use of Python's `exec` method or keyword. The +Python docs succinctly describe why the use of `exec` is risky. + +:Example: + +.. code-block:: none + + >> Issue: Use of exec detected. + Severity: Medium Confidence: High + Location: ./examples/exec-py2.py:2 + 1 exec("do evil") + 2 exec "do evil" + +.. seealso:: + + - https://docs.python.org/2.0/ref/exec.html + - https://www.python.org/dev/peps/pep-0551/#background + - https://www.python.org/dev/peps/pep-0578/#suggested-audit-hook-locations + +.. versionadded:: 0.9.0 +""" + +import six + +import bandit +from bandit.core import test_properties as test + + +def exec_issue(): + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.HIGH, + text="Use of exec detected." + ) + + +if six.PY2: + @test.checks('Exec') + @test.test_id('B102') + def exec_used(context): + return exec_issue() +else: + @test.checks('Call') + @test.test_id('B102') + def exec_used(context): + if context.call_function_name_qual == 'exec': + return exec_issue() diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/general_bad_file_permissions.py b/env_27/lib/python2.7/site-packages/bandit/plugins/general_bad_file_permissions.py new file mode 100644 index 0000000..6b6a002 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/general_bad_file_permissions.py @@ -0,0 +1,89 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +================================================== +B103: Test for setting permissive file permissions +================================================== + +POSIX based operating systems utilize a permissions model to protect access to +parts of the file system. This model supports three roles "owner", "group" +and "world" each role may have a combination of "read", "write" or "execute" +flags sets. Python provides ``chmod`` to manipulate POSIX style permissions. + +This plugin test looks for the use of ``chmod`` and will alert when it is used +to set particularly permissive control flags. A MEDIUM warning is generated if +a file is set to group executable and a HIGH warning is reported if a file is +set world writable. Warnings are given with HIGH confidence. + +:Example: + +.. code-block:: none + + >> Issue: Probable insecure usage of temp file/directory. + Severity: Medium Confidence: Medium + Location: ./examples/os-chmod-py2.py:15 + 14 os.chmod('/etc/hosts', 0o777) + 15 os.chmod('/tmp/oh_hai', 0x1ff) + 16 os.chmod('/etc/passwd', stat.S_IRWXU) + + >> Issue: Chmod setting a permissive mask 0777 on file (key_file). + Severity: High Confidence: High + Location: ./examples/os-chmod-py2.py:17 + 16 os.chmod('/etc/passwd', stat.S_IRWXU) + 17 os.chmod(key_file, 0o777) + 18 + +.. seealso:: + + - https://security.openstack.org/guidelines/dg_apply-restrictive-file-permissions.html # noqa + - https://en.wikipedia.org/wiki/File_system_permissions + - https://security.openstack.org + +.. versionadded:: 0.9.0 + +""" + +import stat + +import bandit +from bandit.core import test_properties as test + + +@test.checks('Call') +@test.test_id('B103') +def set_bad_file_permissions(context): + if 'chmod' in context.call_function_name: + if context.call_args_count == 2: + mode = context.get_call_arg_at_position(1) + + if (mode is not None and isinstance(mode, int) and + (mode & stat.S_IWOTH or mode & stat.S_IXGRP)): + # world writable is an HIGH, group executable is a MEDIUM + if mode & stat.S_IWOTH: + sev_level = bandit.HIGH + else: + sev_level = bandit.MEDIUM + + filename = context.get_call_arg_at_position(0) + if filename is None: + filename = 'NOT PARSED' + return bandit.Issue( + severity=sev_level, + confidence=bandit.HIGH, + text="Chmod setting a permissive mask %s on file (%s)." % + (oct(mode), filename) + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/general_bind_all_interfaces.py b/env_27/lib/python2.7/site-packages/bandit/plugins/general_bind_all_interfaces.py new file mode 100644 index 0000000..37a1e06 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/general_bind_all_interfaces.py @@ -0,0 +1,58 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +======================================== +B104: Test for binding to all interfaces +======================================== + +Binding to all network interfaces can potentially open up a service to traffic +on unintended interfaces, that may not be properly documented or secured. This +plugin test looks for a string pattern "0.0.0.0" that may indicate a hardcoded +binding to all network interfaces. + +:Example: + +.. code-block:: none + + >> Issue: Possible binding to all interfaces. + Severity: Medium Confidence: Medium + Location: ./examples/binding.py:4 + 3 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + 4 s.bind(('0.0.0.0', 31137)) + 5 s.bind(('192.168.0.1', 8080)) + +.. seealso:: + + - https://nvd.nist.gov/vuln/detail/CVE-2018-1281 + +.. versionadded:: 0.9.0 + +""" + +import bandit +from bandit.core import test_properties as test + + +@test.checks('Str') +@test.test_id('B104') +def hardcoded_bind_all_interfaces(context): + if context.string_val == '0.0.0.0': + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.MEDIUM, + text="Possible binding to all interfaces." + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/general_hardcoded_password.py b/env_27/lib/python2.7/site-packages/bandit/plugins/general_hardcoded_password.py new file mode 100644 index 0000000..e9f7c3a --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/general_hardcoded_password.py @@ -0,0 +1,215 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ast +import sys + +import bandit +from bandit.core import test_properties as test + + +CANDIDATES = set(["password", "pass", "passwd", "pwd", "secret", "token", + "secrete"]) + + +def _report(value): + return bandit.Issue( + severity=bandit.LOW, + confidence=bandit.MEDIUM, + text=("Possible hardcoded password: '%s'" % value)) + + +@test.checks('Str') +@test.test_id('B105') +def hardcoded_password_string(context): + """**B105: Test for use of hard-coded password strings** + + The use of hard-coded passwords increases the possibility of password + guessing tremendously. This plugin test looks for all string literals and + checks the following conditions: + + - assigned to a variable that looks like a password + - assigned to a dict key that looks like a password + - used in a comparison with a variable that looks like a password + + Variables are considered to look like a password if they have match any one + of: + + - "password" + - "pass" + - "passwd" + - "pwd" + - "secret" + - "token" + - "secrete" + + Note: this can be noisy and may generate false positives. + + **Config Options:** + + None + + :Example: + + .. code-block:: none + + >> Issue: Possible hardcoded password '(root)' + Severity: Low Confidence: Low + Location: ./examples/hardcoded-passwords.py:5 + 4 def someFunction2(password): + 5 if password == "root": + 6 print("OK, logged in") + + .. seealso:: + + - https://www.owasp.org/index.php/Use_of_hard-coded_password + + .. versionadded:: 0.9.0 + + """ + node = context.node + if isinstance(node.parent, ast.Assign): + # looks for "candidate='some_string'" + for targ in node.parent.targets: + if isinstance(targ, ast.Name) and targ.id in CANDIDATES: + return _report(node.s) + + elif isinstance(node.parent, ast.Index) and node.s in CANDIDATES: + # looks for "dict[candidate]='some_string'" + # assign -> subscript -> index -> string + assign = node.parent.parent.parent + if isinstance(assign, ast.Assign) and isinstance(assign.value, + ast.Str): + return _report(assign.value.s) + + elif isinstance(node.parent, ast.Compare): + # looks for "candidate == 'some_string'" + comp = node.parent + if isinstance(comp.left, ast.Name) and comp.left.id in CANDIDATES: + if isinstance(comp.comparators[0], ast.Str): + return _report(comp.comparators[0].s) + + +@test.checks('Call') +@test.test_id('B106') +def hardcoded_password_funcarg(context): + """**B106: Test for use of hard-coded password function arguments** + + The use of hard-coded passwords increases the possibility of password + guessing tremendously. This plugin test looks for all function calls being + passed a keyword argument that is a string literal. It checks that the + assigned local variable does not look like a password. + + Variables are considered to look like a password if they have match any one + of: + + - "password" + - "pass" + - "passwd" + - "pwd" + - "secret" + - "token" + - "secrete" + + Note: this can be noisy and may generate false positives. + + **Config Options:** + + None + + :Example: + + .. code-block:: none + + >> Issue: [B106:hardcoded_password_funcarg] Possible hardcoded + password: 'blerg' + Severity: Low Confidence: Medium + Location: ./examples/hardcoded-passwords.py:16 + 15 + 16 doLogin(password="blerg") + + .. seealso:: + + - https://www.owasp.org/index.php/Use_of_hard-coded_password + + .. versionadded:: 0.9.0 + + """ + # looks for "function(candidate='some_string')" + for kw in context.node.keywords: + if isinstance(kw.value, ast.Str) and kw.arg in CANDIDATES: + return _report(kw.value.s) + + +@test.checks('FunctionDef') +@test.test_id('B107') +def hardcoded_password_default(context): + """**B107: Test for use of hard-coded password argument defaults** + + The use of hard-coded passwords increases the possibility of password + guessing tremendously. This plugin test looks for all function definitions + that specify a default string literal for some argument. It checks that + the argument does not look like a password. + + Variables are considered to look like a password if they have match any one + of: + + - "password" + - "pass" + - "passwd" + - "pwd" + - "secret" + - "token" + - "secrete" + + Note: this can be noisy and may generate false positives. + + **Config Options:** + + None + + :Example: + + .. code-block:: none + + >> Issue: [B107:hardcoded_password_default] Possible hardcoded + password: 'Admin' + Severity: Low Confidence: Medium + Location: ./examples/hardcoded-passwords.py:1 + + 1 def someFunction(user, password="Admin"): + 2 print("Hi " + user) + + .. seealso:: + + - https://www.owasp.org/index.php/Use_of_hard-coded_password + + .. versionadded:: 0.9.0 + + """ + # looks for "def function(candidate='some_string')" + + # this pads the list of default values with "None" if nothing is given + defs = [None] * (len(context.node.args.args) - + len(context.node.args.defaults)) + defs.extend(context.node.args.defaults) + + # go through all (param, value)s and look for candidates + for key, val in zip(context.node.args.args, defs): + if isinstance(key, ast.Name) or isinstance(key, ast.arg): + check = key.arg if sys.version_info.major > 2 else key.id # Py3 + if isinstance(val, ast.Str) and check in CANDIDATES: + return _report(val.s) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/general_hardcoded_tmp.py b/env_27/lib/python2.7/site-packages/bandit/plugins/general_hardcoded_tmp.py new file mode 100644 index 0000000..49a2304 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/general_hardcoded_tmp.py @@ -0,0 +1,86 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +=================================================== +B108: Test for insecure usage of tmp file/directory +=================================================== + +Safely creating a temporary file or directory means following a number of rules +(see the references for more details). This plugin test looks for strings +starting with (configurable) commonly used temporary paths, for example: + + - /tmp + - /var/tmp + - /dev/shm + - etc + +**Config Options:** + +This test plugin takes a similarly named config block, +`hardcoded_tmp_directory`. The config block provides a Python list, `tmp_dirs`, +that lists string fragments indicating possible temporary file paths. Any +string starting with one of these fragments will report a MEDIUM confidence +issue. + +.. code-block:: yaml + + hardcoded_tmp_directory: + tmp_dirs: ['/tmp', '/var/tmp', '/dev/shm'] + + +:Example: + +.. code-block: none + + >> Issue: Probable insecure usage of temp file/directory. + Severity: Medium Confidence: Medium + Location: ./examples/hardcoded-tmp.py:1 + 1 f = open('/tmp/abc', 'w') + 2 f.write('def') + +.. seealso:: + + - https://security.openstack.org/guidelines/dg_using-temporary-files-securely.html # noqa + +.. versionadded:: 0.9.0 + +""" + +import bandit +from bandit.core import test_properties as test + + +def gen_config(name): + if name == 'hardcoded_tmp_directory': + return {'tmp_dirs': ['/tmp', '/var/tmp', '/dev/shm']} + + +@test.takes_config +@test.checks('Str') +@test.test_id('B108') +def hardcoded_tmp_directory(context, config): + if config is not None and 'tmp_dirs' in config: + tmp_dirs = config['tmp_dirs'] + else: + tmp_dirs = ['/tmp', '/var/tmp', '/dev/shm'] + + if any(context.string_val.startswith(s) for s in tmp_dirs): + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.MEDIUM, + text="Probable insecure usage of temp file/directory." + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/hashlib_new_insecure_functions.py b/env_27/lib/python2.7/site-packages/bandit/plugins/hashlib_new_insecure_functions.py new file mode 100644 index 0000000..46436c2 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/hashlib_new_insecure_functions.py @@ -0,0 +1,63 @@ +# -*- coding:utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +========================================================================== +B324: Test for use of insecure md4 and md5 hash functions in hashlib.new() +========================================================================== + +This plugin checks for the usage of the insecure MD4 and MD5 hash functions +in ``hashlib.new`` function. The ``hashlib.new`` function provides the ability +to construct a new hashing object using the named algorithm. This can be used +to create insecure hash functions like MD4 and MD5 if they are passed as +algorithm names to this function. + +This is similar to B303 blacklist check, except that this checks for insecure +hash functions created using ``hashlib.new`` function. + +:Example: + + >> Issue: [B324:hashlib_new] Use of insecure MD4 or MD5 hash function. + Severity: Medium Confidence: High + Location: examples/hashlib_new_insecure_funcs.py:3 + 2 + 3 md5_hash = hashlib.new('md5', string='test') + 4 print(md5_hash) + + +.. versionadded:: 1.5.0 + +""" + +import bandit +from bandit.core import test_properties as test + + +@test.test_id('B324') +@test.checks('Call') +def hashlib_new(context): + if isinstance(context.call_function_name_qual, str): + qualname_list = context.call_function_name_qual.split('.') + func = qualname_list[-1] + if 'hashlib' in qualname_list and func == 'new': + args = context.call_args + keywords = context.call_keywords + name = args[0] if args else keywords['name'] + if name.lower() in ('md4', 'md5'): + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.HIGH, + text="Use of insecure MD4 or MD5 hash function.", + lineno=context.node.lineno, + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/injection_paramiko.py b/env_27/lib/python2.7/site-packages/bandit/plugins/injection_paramiko.py new file mode 100644 index 0000000..a87e13b --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/injection_paramiko.py @@ -0,0 +1,74 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +============================================== +B601: Test for shell injection within Paramiko +============================================== + +Paramiko is a Python library designed to work with the SSH2 protocol for secure +(encrypted and authenticated) connections to remote machines. It is intended to +run commands on a remote host. These commands are run within a shell on the +target and are thus vulnerable to various shell injection attacks. Bandit +reports a MEDIUM issue when it detects the use of Paramiko's "exec_command" or +"invoke_shell" methods advising the user to check inputs are correctly +sanitized. + +:Example: + +.. code-block:: none + + >> Issue: Possible shell injection via Paramiko call, check inputs are + properly sanitized. + Severity: Medium Confidence: Medium + Location: ./examples/paramiko_injection.py:4 + 3 # this is not safe + 4 paramiko.exec_command('something; reallly; unsafe') + 5 + + >> Issue: Possible shell injection via Paramiko call, check inputs are + properly sanitized. + Severity: Medium Confidence: Medium + Location: ./examples/paramiko_injection.py:10 + 9 # this is not safe + 10 SSHClient.invoke_shell('something; bad; here\n') + 11 + +.. seealso:: + + - https://security.openstack.org + - https://github.com/paramiko/paramiko + - https://www.owasp.org/index.php/Command_Injection + +.. versionadded:: 0.12.0 + +""" + +import bandit +from bandit.core import test_properties as test + + +@test.checks('Call') +@test.test_id('B601') +def paramiko_calls(context): + issue_text = ('Possible shell injection via Paramiko call, check inputs ' + 'are properly sanitized.') + for module in ['paramiko']: + if context.is_module_imported_like(module): + if context.call_function_name in ['exec_command', 'invoke_shell']: + return bandit.Issue(severity=bandit.MEDIUM, + confidence=bandit.MEDIUM, + text=issue_text) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/injection_shell.py b/env_27/lib/python2.7/site-packages/bandit/plugins/injection_shell.py new file mode 100644 index 0000000..6b73297 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/injection_shell.py @@ -0,0 +1,657 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ast +import re + +import six + +import bandit +from bandit.core import test_properties as test + +# yuck, regex: starts with a windows drive letter (eg C:) +# or one of our path delimeter characters (/, \, .) +full_path_match = re.compile(r'^(?:[A-Za-z](?=\:)|[\\\/\.])') + + +def _evaluate_shell_call(context): + no_formatting = isinstance(context.node.args[0], ast.Str) + + if no_formatting: + return bandit.LOW + else: + return bandit.HIGH + + +def gen_config(name): + if name == 'shell_injection': + return { + # Start a process using the subprocess module, or one of its + # wrappers. + 'subprocess': + ['subprocess.Popen', + 'subprocess.call', + 'subprocess.check_call', + 'subprocess.check_output', + 'subprocess.run'], + + # Start a process with a function vulnerable to shell injection. + 'shell': + ['os.system', + 'os.popen', + 'os.popen2', + 'os.popen3', + 'os.popen4', + 'popen2.popen2', + 'popen2.popen3', + 'popen2.popen4', + 'popen2.Popen3', + 'popen2.Popen4', + 'commands.getoutput', + 'commands.getstatusoutput'], + + # Start a process with a function that is not vulnerable to shell + # injection. + 'no_shell': + ['os.execl', + 'os.execle', + 'os.execlp', + 'os.execlpe', + 'os.execv', + 'os.execve', + 'os.execvp', + 'os.execvpe', + 'os.spawnl', + 'os.spawnle', + 'os.spawnlp', + 'os.spawnlpe', + 'os.spawnv', + 'os.spawnve', + 'os.spawnvp', + 'os.spawnvpe', + 'os.startfile'] + } + + +def has_shell(context): + keywords = context.node.keywords + result = False + if 'shell' in context.call_keywords: + for key in keywords: + if key.arg == 'shell': + val = key.value + if isinstance(val, ast.Num): + result = bool(val.n) + elif isinstance(val, ast.List): + result = bool(val.elts) + elif isinstance(val, ast.Dict): + result = bool(val.keys) + elif isinstance(val, ast.Name) and val.id in ['False', 'None']: + result = False + elif not six.PY2 and isinstance(val, ast.NameConstant): + result = val.value + else: + result = True + return result + + +@test.takes_config('shell_injection') +@test.checks('Call') +@test.test_id('B602') +def subprocess_popen_with_shell_equals_true(context, config): + """**B602: Test for use of popen with shell equals true** + + Python possesses many mechanisms to invoke an external executable. However, + doing so may present a security issue if appropriate care is not taken to + sanitize any user provided or variable input. + + This plugin test is part of a family of tests built to check for process + spawning and warn appropriately. Specifically, this test looks for the + spawning of a subprocess using a command shell. This type of subprocess + invocation is dangerous as it is vulnerable to various shell injection + attacks. Great care should be taken to sanitize all input in order to + mitigate this risk. Calls of this type are identified by a parameter of + 'shell=True' being given. + + Additionally, this plugin scans the command string given and adjusts its + reported severity based on how it is presented. If the command string is a + simple static string containing no special shell characters, then the + resulting issue has low severity. If the string is static, but contains + shell formatting characters or wildcards, then the reported issue is + medium. Finally, if the string is computed using Python's string + manipulation or formatting operations, then the reported issue has high + severity. These severity levels reflect the likelihood that the code is + vulnerable to injection. + + See also: + + - :doc:`../plugins/linux_commands_wildcard_injection` + - :doc:`../plugins/subprocess_without_shell_equals_true` + - :doc:`../plugins/start_process_with_no_shell` + - :doc:`../plugins/start_process_with_a_shell` + - :doc:`../plugins/start_process_with_partial_path` + + **Config Options:** + + This plugin test shares a configuration with others in the same family, + namely `shell_injection`. This configuration is divided up into three + sections, `subprocess`, `shell` and `no_shell`. They each list Python calls + that spawn subprocesses, invoke commands within a shell, or invoke commands + without a shell (by replacing the calling process) respectively. + + This plugin specifically scans for methods listed in `subprocess` section + that have shell=True specified. + + .. code-block:: yaml + + shell_injection: + + # Start a process using the subprocess module, or one of its + wrappers. + subprocess: + - subprocess.Popen + - subprocess.call + + + :Example: + + .. code-block:: none + + >> Issue: subprocess call with shell=True seems safe, but may be + changed in the future, consider rewriting without shell + Severity: Low Confidence: High + Location: ./examples/subprocess_shell.py:21 + 20 subprocess.check_call(['/bin/ls', '-l'], shell=False) + 21 subprocess.check_call('/bin/ls -l', shell=True) + 22 + + >> Issue: call with shell=True contains special shell characters, + consider moving extra logic into Python code + Severity: Medium Confidence: High + Location: ./examples/subprocess_shell.py:26 + 25 + 26 subprocess.Popen('/bin/ls *', shell=True) + 27 subprocess.Popen('/bin/ls %s' % ('something',), shell=True) + + >> Issue: subprocess call with shell=True identified, security issue. + Severity: High Confidence: High + Location: ./examples/subprocess_shell.py:27 + 26 subprocess.Popen('/bin/ls *', shell=True) + 27 subprocess.Popen('/bin/ls %s' % ('something',), shell=True) + 28 subprocess.Popen('/bin/ls {}'.format('something'), shell=True) + + .. seealso:: + + - https://security.openstack.org + - https://docs.python.org/2/library/subprocess.html#frequently-used-arguments # noqa + - https://security.openstack.org/guidelines/dg_use-subprocess-securely.html + - https://security.openstack.org/guidelines/dg_avoid-shell-true.html + + .. versionadded:: 0.9.0 + """ + if config and context.call_function_name_qual in config['subprocess']: + if has_shell(context): + if len(context.call_args) > 0: + sev = _evaluate_shell_call(context) + if sev == bandit.LOW: + return bandit.Issue( + severity=bandit.LOW, + confidence=bandit.HIGH, + text='subprocess call with shell=True seems safe, but ' + 'may be changed in the future, consider ' + 'rewriting without shell', + lineno=context.get_lineno_for_call_arg('shell'), + ) + else: + return bandit.Issue( + severity=bandit.HIGH, + confidence=bandit.HIGH, + text='subprocess call with shell=True identified, ' + 'security issue.', + lineno=context.get_lineno_for_call_arg('shell'), + ) + + +@test.takes_config('shell_injection') +@test.checks('Call') +@test.test_id('B603') +def subprocess_without_shell_equals_true(context, config): + """**B603: Test for use of subprocess with shell equals true** + + Python possesses many mechanisms to invoke an external executable. However, + doing so may present a security issue if appropriate care is not taken to + sanitize any user provided or variable input. + + This plugin test is part of a family of tests built to check for process + spawning and warn appropriately. Specifically, this test looks for the + spawning of a subprocess without the use of a command shell. This type of + subprocess invocation is not vulnerable to shell injection attacks, but + care should still be taken to ensure validity of input. + + Because this is a lesser issue than that described in + `subprocess_popen_with_shell_equals_true` a LOW severity warning is + reported. + + See also: + + - :doc:`../plugins/linux_commands_wildcard_injection` + - :doc:`../plugins/subprocess_popen_with_shell_equals_true` + - :doc:`../plugins/start_process_with_no_shell` + - :doc:`../plugins/start_process_with_a_shell` + - :doc:`../plugins/start_process_with_partial_path` + + **Config Options:** + + This plugin test shares a configuration with others in the same family, + namely `shell_injection`. This configuration is divided up into three + sections, `subprocess`, `shell` and `no_shell`. They each list Python calls + that spawn subprocesses, invoke commands within a shell, or invoke commands + without a shell (by replacing the calling process) respectively. + + This plugin specifically scans for methods listed in `subprocess` section + that have shell=False specified. + + .. code-block:: yaml + + shell_injection: + # Start a process using the subprocess module, or one of its + wrappers. + subprocess: + - subprocess.Popen + - subprocess.call + + :Example: + + .. code-block:: none + + >> Issue: subprocess call - check for execution of untrusted input. + Severity: Low Confidence: High + Location: ./examples/subprocess_shell.py:23 + 22 + 23 subprocess.check_output(['/bin/ls', '-l']) + 24 + + .. seealso:: + + - https://security.openstack.org + - https://docs.python.org/2/library/subprocess.html#frequently-used-arguments # noqa + - https://security.openstack.org/guidelines/dg_avoid-shell-true.html + - https://security.openstack.org/guidelines/dg_use-subprocess-securely.html + + .. versionadded:: 0.9.0 + """ + if config and context.call_function_name_qual in config['subprocess']: + if not has_shell(context): + return bandit.Issue( + severity=bandit.LOW, + confidence=bandit.HIGH, + text='subprocess call - check for execution of untrusted ' + 'input.', + lineno=context.get_lineno_for_call_arg('shell'), + ) + + +@test.takes_config('shell_injection') +@test.checks('Call') +@test.test_id('B604') +def any_other_function_with_shell_equals_true(context, config): + """**B604: Test for any function with shell equals true** + + Python possesses many mechanisms to invoke an external executable. However, + doing so may present a security issue if appropriate care is not taken to + sanitize any user provided or variable input. + + This plugin test is part of a family of tests built to check for process + spawning and warn appropriately. Specifically, this plugin test + interrogates method calls for the presence of a keyword parameter `shell` + equalling true. It is related to detection of shell injection issues and is + intended to catch custom wrappers to vulnerable methods that may have been + created. + + See also: + + - :doc:`../plugins/linux_commands_wildcard_injection` + - :doc:`../plugins/subprocess_popen_with_shell_equals_true` + - :doc:`../plugins/subprocess_without_shell_equals_true` + - :doc:`../plugins/start_process_with_no_shell` + - :doc:`../plugins/start_process_with_a_shell` + - :doc:`../plugins/start_process_with_partial_path` + + **Config Options:** + + This plugin test shares a configuration with others in the same family, + namely `shell_injection`. This configuration is divided up into three + sections, `subprocess`, `shell` and `no_shell`. They each list Python calls + that spawn subprocesses, invoke commands within a shell, or invoke commands + without a shell (by replacing the calling process) respectively. + + Specifically, this plugin excludes those functions listed under the + subprocess section, these methods are tested in a separate specific test + plugin and this exclusion prevents duplicate issue reporting. + + .. code-block:: yaml + + shell_injection: + # Start a process using the subprocess module, or one of its + wrappers. + subprocess: [subprocess.Popen, subprocess.call, + subprocess.check_call, subprocess.check_output + execute_with_timeout] + + + :Example: + + .. code-block:: none + + >> Issue: Function call with shell=True parameter identified, possible + security issue. + Severity: Medium Confidence: High + Location: ./examples/subprocess_shell.py:9 + 8 pop('/bin/gcc --version', shell=True) + 9 Popen('/bin/gcc --version', shell=True) + 10 + + .. seealso:: + + - https://security.openstack.org/guidelines/dg_avoid-shell-true.html + - https://security.openstack.org/guidelines/dg_use-subprocess-securely.html # noqa + + .. versionadded:: 0.9.0 + """ + if config and context.call_function_name_qual not in config['subprocess']: + if has_shell(context): + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.LOW, + text='Function call with shell=True parameter identified, ' + 'possible security issue.', + lineno=context.get_lineno_for_call_arg('shell'), + ) + + +@test.takes_config('shell_injection') +@test.checks('Call') +@test.test_id('B605') +def start_process_with_a_shell(context, config): + """**B605: Test for starting a process with a shell** + + Python possesses many mechanisms to invoke an external executable. However, + doing so may present a security issue if appropriate care is not taken to + sanitize any user provided or variable input. + + This plugin test is part of a family of tests built to check for process + spawning and warn appropriately. Specifically, this test looks for the + spawning of a subprocess using a command shell. This type of subprocess + invocation is dangerous as it is vulnerable to various shell injection + attacks. Great care should be taken to sanitize all input in order to + mitigate this risk. Calls of this type are identified by the use of certain + commands which are known to use shells. Bandit will report a LOW + severity warning. + + See also: + + - :doc:`../plugins/linux_commands_wildcard_injection` + - :doc:`../plugins/subprocess_without_shell_equals_true` + - :doc:`../plugins/start_process_with_no_shell` + - :doc:`../plugins/start_process_with_partial_path` + - :doc:`../plugins/subprocess_popen_with_shell_equals_true` + + **Config Options:** + + This plugin test shares a configuration with others in the same family, + namely `shell_injection`. This configuration is divided up into three + sections, `subprocess`, `shell` and `no_shell`. They each list Python calls + that spawn subprocesses, invoke commands within a shell, or invoke commands + without a shell (by replacing the calling process) respectively. + + This plugin specifically scans for methods listed in `shell` section. + + .. code-block:: yaml + + shell_injection: + shell: + - os.system + - os.popen + - os.popen2 + - os.popen3 + - os.popen4 + - popen2.popen2 + - popen2.popen3 + - popen2.popen4 + - popen2.Popen3 + - popen2.Popen4 + - commands.getoutput + - commands.getstatusoutput + + :Example: + + .. code-block:: none + + >> Issue: Starting a process with a shell: check for injection. + Severity: Low Confidence: Medium + Location: examples/os_system.py:3 + 2 + 3 os.system('/bin/echo hi') + + .. seealso:: + + - https://security.openstack.org + - https://docs.python.org/2/library/os.html#os.system + - https://docs.python.org/2/library/subprocess.html#frequently-used-arguments # noqa + - https://security.openstack.org/guidelines/dg_use-subprocess-securely.html + + .. versionadded:: 0.10.0 + """ + if config and context.call_function_name_qual in config['shell']: + if len(context.call_args) > 0: + sev = _evaluate_shell_call(context) + if sev == bandit.LOW: + return bandit.Issue( + severity=bandit.LOW, + confidence=bandit.HIGH, + text='Starting a process with a shell: ' + 'Seems safe, but may be changed in the future, ' + 'consider rewriting without shell' + ) + else: + return bandit.Issue( + severity=bandit.HIGH, + confidence=bandit.HIGH, + text='Starting a process with a shell, possible injection' + ' detected, security issue.' + ) + + +@test.takes_config('shell_injection') +@test.checks('Call') +@test.test_id('B606') +def start_process_with_no_shell(context, config): + """**B606: Test for starting a process with no shell** + + Python possesses many mechanisms to invoke an external executable. However, + doing so may present a security issue if appropriate care is not taken to + sanitize any user provided or variable input. + + This plugin test is part of a family of tests built to check for process + spawning and warn appropriately. Specifically, this test looks for the + spawning of a subprocess in a way that doesn't use a shell. Although this + is generally safe, it maybe useful for penetration testing workflows to + track where external system calls are used. As such a LOW severity message + is generated. + + See also: + + - :doc:`../plugins/linux_commands_wildcard_injection` + - :doc:`../plugins/subprocess_without_shell_equals_true` + - :doc:`../plugins/start_process_with_a_shell` + - :doc:`../plugins/start_process_with_partial_path` + - :doc:`../plugins/subprocess_popen_with_shell_equals_true` + + **Config Options:** + + This plugin test shares a configuration with others in the same family, + namely `shell_injection`. This configuration is divided up into three + sections, `subprocess`, `shell` and `no_shell`. They each list Python calls + that spawn subprocesses, invoke commands within a shell, or invoke commands + without a shell (by replacing the calling process) respectively. + + This plugin specifically scans for methods listed in `no_shell` section. + + .. code-block:: yaml + + shell_injection: + no_shell: + - os.execl + - os.execle + - os.execlp + - os.execlpe + - os.execv + - os.execve + - os.execvp + - os.execvpe + - os.spawnl + - os.spawnle + - os.spawnlp + - os.spawnlpe + - os.spawnv + - os.spawnve + - os.spawnvp + - os.spawnvpe + - os.startfile + + :Example: + + .. code-block:: none + + >> Issue: [start_process_with_no_shell] Starting a process without a + shell. + Severity: Low Confidence: Medium + Location: examples/os-spawn.py:8 + 7 os.spawnv(mode, path, args) + 8 os.spawnve(mode, path, args, env) + 9 os.spawnvp(mode, file, args) + + .. seealso:: + + - https://security.openstack.org + - https://docs.python.org/2/library/os.html#os.system + - https://docs.python.org/2/library/subprocess.html#frequently-used-arguments # noqa + - https://security.openstack.org/guidelines/dg_use-subprocess-securely.html + + .. versionadded:: 0.10.0 + """ + + if config and context.call_function_name_qual in config['no_shell']: + return bandit.Issue( + severity=bandit.LOW, + confidence=bandit.MEDIUM, + text='Starting a process without a shell.' + ) + + +@test.takes_config('shell_injection') +@test.checks('Call') +@test.test_id('B607') +def start_process_with_partial_path(context, config): + """**B607: Test for starting a process with a partial path** + + Python possesses many mechanisms to invoke an external executable. If the + desired executable path is not fully qualified relative to the filesystem + root then this may present a potential security risk. + + In POSIX environments, the `PATH` environment variable is used to specify a + set of standard locations that will be searched for the first matching + named executable. While convenient, this behavior may allow a malicious + actor to exert control over a system. If they are able to adjust the + contents of the `PATH` variable, or manipulate the file system, then a + bogus executable may be discovered in place of the desired one. This + executable will be invoked with the user privileges of the Python process + that spawned it, potentially a highly privileged user. + + This test will scan the parameters of all configured Python methods, + looking for paths that do not start at the filesystem root, that is, do not + have a leading '/' character. + + **Config Options:** + + This plugin test shares a configuration with others in the same family, + namely `shell_injection`. This configuration is divided up into three + sections, `subprocess`, `shell` and `no_shell`. They each list Python calls + that spawn subprocesses, invoke commands within a shell, or invoke commands + without a shell (by replacing the calling process) respectively. + + This test will scan parameters of all methods in all sections. Note that + methods are fully qualified and de-aliased prior to checking. + + .. code-block:: yaml + + shell_injection: + # Start a process using the subprocess module, or one of its + wrappers. + subprocess: + - subprocess.Popen + - subprocess.call + + # Start a process with a function vulnerable to shell injection. + shell: + - os.system + - os.popen + - popen2.Popen3 + - popen2.Popen4 + - commands.getoutput + - commands.getstatusoutput + # Start a process with a function that is not vulnerable to shell + injection. + no_shell: + - os.execl + - os.execle + + + :Example: + + .. code-block:: none + + >> Issue: Starting a process with a partial executable path + Severity: Low Confidence: High + Location: ./examples/partial_path_process.py:3 + 2 from subprocess import Popen as pop + 3 pop('gcc --version', shell=False) + + .. seealso:: + + - https://security.openstack.org + - https://docs.python.org/2/library/os.html#process-management + + .. versionadded:: 0.13.0 + """ + + if config and len(context.call_args): + if(context.call_function_name_qual in config['subprocess'] or + context.call_function_name_qual in config['shell'] or + context.call_function_name_qual in config['no_shell']): + + node = context.node.args[0] + # some calls take an arg list, check the first part + if isinstance(node, ast.List): + node = node.elts[0] + + # make sure the param is a string literal and not a var name + if isinstance(node, ast.Str) and not full_path_match.match(node.s): + return bandit.Issue( + severity=bandit.LOW, + confidence=bandit.HIGH, + text='Starting a process with a partial executable path' + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/injection_sql.py b/env_27/lib/python2.7/site-packages/bandit/plugins/injection_sql.py new file mode 100644 index 0000000..afb63a5 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/injection_sql.py @@ -0,0 +1,115 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +============================ +B608: Test for SQL injection +============================ + +An SQL injection attack consists of insertion or "injection" of a SQL query via +the input data given to an application. It is a very common attack vector. This +plugin test looks for strings that resemble SQL statements that are involved in +some form of string building operation. For example: + + - "SELECT %s FROM derp;" % var + - "SELECT thing FROM " + tab + - "SELECT " + val + " FROM " + tab + ... + - "SELECT {} FROM derp;".format(var) + +Unless care is taken to sanitize and control the input data when building such +SQL statement strings, an injection attack becomes possible. If strings of this +nature are discovered, a LOW confidence issue is reported. In order to boost +result confidence, this plugin test will also check to see if the discovered +string is in use with standard Python DBAPI calls `execute` or `executemany`. +If so, a MEDIUM issue is reported. For example: + + - cursor.execute("SELECT %s FROM derp;" % var) + + +:Example: + +.. code-block:: none + + >> Issue: Possible SQL injection vector through string-based query + construction. + Severity: Medium Confidence: Low + Location: ./examples/sql_statements_without_sql_alchemy.py:4 + 3 query = "DELETE FROM foo WHERE id = '%s'" % identifier + 4 query = "UPDATE foo SET value = 'b' WHERE id = '%s'" % identifier + 5 + +.. seealso:: + + - https://www.owasp.org/index.php/SQL_Injection + - https://security.openstack.org/guidelines/dg_parameterize-database-queries.html # noqa + +.. versionadded:: 0.9.0 + +""" + +import ast +import re + +import bandit +from bandit.core import test_properties as test +from bandit.core import utils + +SIMPLE_SQL_RE = re.compile( + r'(select\s.*from\s|' + r'delete\s+from\s|' + r'insert\s+into\s.*values\s|' + r'update\s.*set\s)', + re.IGNORECASE | re.DOTALL, +) + + +def _check_string(data): + return SIMPLE_SQL_RE.search(data) is not None + + +def _evaluate_ast(node): + wrapper = None + statement = '' + + if isinstance(node.parent, ast.BinOp): + out = utils.concat_string(node, node.parent) + wrapper = out[0].parent + statement = out[1] + elif (isinstance(node.parent, ast.Attribute) + and node.parent.attr == 'format'): + statement = node.s + # Hierarchy for "".format() is Wrapper -> Call -> Attribute -> Str + wrapper = node.parent.parent.parent + + if isinstance(wrapper, ast.Call): # wrapped in "execute" call? + names = ['execute', 'executemany'] + name = utils.get_called_name(wrapper) + return (name in names, statement) + else: + return (False, statement) + + +@test.checks('Str') +@test.test_id('B608') +def hardcoded_sql_expressions(context): + val = _evaluate_ast(context.node) + if _check_string(val[1]): + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.MEDIUM if val[0] else bandit.LOW, + text="Possible SQL injection vector through string-based " + "query construction." + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/injection_wildcard.py b/env_27/lib/python2.7/site-packages/bandit/plugins/injection_wildcard.py new file mode 100644 index 0000000..9f99082 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/injection_wildcard.py @@ -0,0 +1,149 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +======================================== +B609: Test for use of wildcard injection +======================================== + +Python provides a number of methods that emulate the behavior of standard Linux +command line utilities. Like their Linux counterparts, these commands may take +a wildcard "\*" character in place of a file system path. This is interpreted +to mean "any and all files or folders" and can be used to build partially +qualified paths, such as "/home/user/\*". + +The use of partially qualified paths may result in unintended consequences if +an unexpected file or symlink is placed into the path location given. This +becomes particularly dangerous when combined with commands used to manipulate +file permissions or copy data off of a system. + +This test plugin looks for usage of the following commands in conjunction with +wild card parameters: + +- 'chown' +- 'chmod' +- 'tar' +- 'rsync' + +As well as any method configured in the shell or subprocess injection test +configurations. + + +**Config Options:** + +This plugin test shares a configuration with others in the same family, namely +`shell_injection`. This configuration is divided up into three sections, +`subprocess`, `shell` and `no_shell`. They each list Python calls that spawn +subprocesses, invoke commands within a shell, or invoke commands without a +shell (by replacing the calling process) respectively. + +This test will scan parameters of all methods in all sections. Note that +methods are fully qualified and de-aliased prior to checking. + + +.. code-block:: yaml + + shell_injection: + # Start a process using the subprocess module, or one of its wrappers. + subprocess: + - subprocess.Popen + - subprocess.call + + # Start a process with a function vulnerable to shell injection. + shell: + - os.system + - os.popen + - popen2.Popen3 + - popen2.Popen4 + - commands.getoutput + - commands.getstatusoutput + # Start a process with a function that is not vulnerable to shell + injection. + no_shell: + - os.execl + - os.execle + + +:Example: + +.. code-block:: none + + >> Issue: Possible wildcard injection in call: subprocess.Popen + Severity: High Confidence: Medium + Location: ./examples/wildcard-injection.py:8 + 7 o.popen2('/bin/chmod *') + 8 subp.Popen('/bin/chown *', shell=True) + 9 + + >> Issue: subprocess call - check for execution of untrusted input. + Severity: Low Confidence: High + Location: ./examples/wildcard-injection.py:11 + 10 # Not vulnerable to wildcard injection + 11 subp.Popen('/bin/rsync *') + 12 subp.Popen("/bin/chmod *") + + +.. seealso:: + + - https://security.openstack.org + - https://en.wikipedia.org/wiki/Wildcard_character + - http://www.defensecode.com/public/DefenseCode_Unix_WildCards_Gone_Wild.txt + +.. versionadded:: 0.9.0 + +""" + +import bandit +from bandit.core import test_properties as test +from bandit.plugins import injection_shell # NOTE(tkelsey): shared config + + +gen_config = injection_shell.gen_config + + +@test.takes_config('shell_injection') +@test.checks('Call') +@test.test_id('B609') +def linux_commands_wildcard_injection(context, config): + if not ('shell' in config and 'subprocess' in config): + return + + vulnerable_funcs = ['chown', 'chmod', 'tar', 'rsync'] + if context.call_function_name_qual in config['shell'] or ( + context.call_function_name_qual in config['subprocess'] and + context.check_call_arg_value('shell', 'True')): + if context.call_args_count >= 1: + call_argument = context.get_call_arg_at_position(0) + argument_string = '' + if isinstance(call_argument, list): + for li in call_argument: + argument_string = argument_string + ' %s' % li + elif isinstance(call_argument, str): + argument_string = call_argument + + if argument_string != '': + for vulnerable_func in vulnerable_funcs: + if( + vulnerable_func in argument_string and + '*' in argument_string + ): + return bandit.Issue( + severity=bandit.HIGH, + confidence=bandit.MEDIUM, + text="Possible wildcard injection in call: %s" % + context.call_function_name_qual, + lineno=context.get_lineno_for_call_arg('shell'), + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/insecure_ssl_tls.py b/env_27/lib/python2.7/site-packages/bandit/plugins/insecure_ssl_tls.py new file mode 100644 index 0000000..eaf6502 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/insecure_ssl_tls.py @@ -0,0 +1,263 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import bandit +from bandit.core import test_properties as test + + +def get_bad_proto_versions(config): + return config['bad_protocol_versions'] + + +def gen_config(name): + if name == 'ssl_with_bad_version': + return {'bad_protocol_versions': + ['PROTOCOL_SSLv2', + 'SSLv2_METHOD', + 'SSLv23_METHOD', + 'PROTOCOL_SSLv3', # strict option + 'PROTOCOL_TLSv1', # strict option + 'SSLv3_METHOD', # strict option + 'TLSv1_METHOD']} # strict option + + +@test.takes_config +@test.checks('Call') +@test.test_id('B502') +def ssl_with_bad_version(context, config): + """**B502: Test for SSL use with bad version used** + + Several highly publicized exploitable flaws have been discovered + in all versions of SSL and early versions of TLS. It is strongly + recommended that use of the following known broken protocol versions be + avoided: + + - SSL v2 + - SSL v3 + - TLS v1 + - TLS v1.1 + + This plugin test scans for calls to Python methods with parameters that + indicate the used broken SSL/TLS protocol versions. Currently, detection + supports methods using Python's native SSL/TLS support and the pyOpenSSL + module. A HIGH severity warning will be reported whenever known broken + protocol versions are detected. + + It is worth noting that native support for TLS 1.2 is only available in + more recent Python versions, specifically 2.7.9 and up, and 3.x + + A note on 'SSLv23': + + Amongst the available SSL/TLS versions provided by Python/pyOpenSSL there + exists the option to use SSLv23. This very poorly named option actually + means "use the highest version of SSL/TLS supported by both the server and + client". This may (and should be) a version well in advance of SSL v2 or + v3. Bandit can scan for the use of SSLv23 if desired, but its detection + does not necessarily indicate a problem. + + When using SSLv23 it is important to also provide flags to explicitly + exclude bad versions of SSL/TLS from the protocol versions considered. Both + the Python native and pyOpenSSL modules provide the ``OP_NO_SSLv2`` and + ``OP_NO_SSLv3`` flags for this purpose. + + **Config Options:** + + .. code-block:: yaml + + ssl_with_bad_version: + bad_protocol_versions: + - PROTOCOL_SSLv2 + - SSLv2_METHOD + - SSLv23_METHOD + - PROTOCOL_SSLv3 # strict option + - PROTOCOL_TLSv1 # strict option + - SSLv3_METHOD # strict option + - TLSv1_METHOD # strict option + + :Example: + + .. code-block:: none + + >> Issue: ssl.wrap_socket call with insecure SSL/TLS protocol version + identified, security issue. + Severity: High Confidence: High + Location: ./examples/ssl-insecure-version.py:13 + 12 # strict tests + 13 ssl.wrap_socket(ssl_version=ssl.PROTOCOL_SSLv3) + 14 ssl.wrap_socket(ssl_version=ssl.PROTOCOL_TLSv1) + + .. seealso:: + + - :func:`ssl_with_bad_defaults` + - :func:`ssl_with_no_version` + - http://heartbleed.com/ + - https://poodlebleed.com/ + - https://security.openstack.org/ + - https://security.openstack.org/guidelines/dg_move-data-securely.html + + .. versionadded:: 0.9.0 + """ + bad_ssl_versions = get_bad_proto_versions(config) + if context.call_function_name_qual == 'ssl.wrap_socket': + if context.check_call_arg_value('ssl_version', bad_ssl_versions): + return bandit.Issue( + severity=bandit.HIGH, + confidence=bandit.HIGH, + text="ssl.wrap_socket call with insecure SSL/TLS protocol " + "version identified, security issue.", + lineno=context.get_lineno_for_call_arg('ssl_version'), + ) + elif context.call_function_name_qual == 'pyOpenSSL.SSL.Context': + if context.check_call_arg_value('method', bad_ssl_versions): + return bandit.Issue( + severity=bandit.HIGH, + confidence=bandit.HIGH, + text="SSL.Context call with insecure SSL/TLS protocol " + "version identified, security issue.", + lineno=context.get_lineno_for_call_arg('method'), + ) + + elif (context.call_function_name_qual != 'ssl.wrap_socket' and + context.call_function_name_qual != 'pyOpenSSL.SSL.Context'): + if (context.check_call_arg_value('method', bad_ssl_versions) or + context.check_call_arg_value('ssl_version', bad_ssl_versions)): + lineno = (context.get_lineno_for_call_arg('method') or + context.get_lineno_for_call_arg('ssl_version')) + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.MEDIUM, + text="Function call with insecure SSL/TLS protocol " + "identified, possible security issue.", + lineno=lineno, + ) + + +@test.takes_config("ssl_with_bad_version") +@test.checks('FunctionDef') +@test.test_id('B503') +def ssl_with_bad_defaults(context, config): + """**B503: Test for SSL use with bad defaults specified** + + This plugin is part of a family of tests that detect the use of known bad + versions of SSL/TLS, please see :doc:`../plugins/ssl_with_bad_version` for + a complete discussion. Specifically, this plugin test scans for Python + methods with default parameter values that specify the use of broken + SSL/TLS protocol versions. Currently, detection supports methods using + Python's native SSL/TLS support and the pyOpenSSL module. A MEDIUM severity + warning will be reported whenever known broken protocol versions are + detected. + + **Config Options:** + + This test shares the configuration provided for the standard + :doc:`../plugins/ssl_with_bad_version` test, please refer to its + documentation. + + :Example: + + .. code-block:: none + + >> Issue: Function definition identified with insecure SSL/TLS protocol + version by default, possible security issue. + Severity: Medium Confidence: Medium + Location: ./examples/ssl-insecure-version.py:28 + 27 + 28 def open_ssl_socket(version=SSL.SSLv2_METHOD): + 29 pass + + .. seealso:: + + - :func:`ssl_with_bad_version` + - :func:`ssl_with_no_version` + - http://heartbleed.com/ + - https://poodlebleed.com/ + - https://security.openstack.org/ + - https://security.openstack.org/guidelines/dg_move-data-securely.html + + .. versionadded:: 0.9.0 + """ + + bad_ssl_versions = get_bad_proto_versions(config) + for default in context.function_def_defaults_qual: + val = default.split(".")[-1] + if val in bad_ssl_versions: + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.MEDIUM, + text="Function definition identified with insecure SSL/TLS " + "protocol version by default, possible security " + "issue." + ) + + +@test.checks('Call') +@test.test_id('B504') +def ssl_with_no_version(context): + """**B504: Test for SSL use with no version specified** + + This plugin is part of a family of tests that detect the use of known bad + versions of SSL/TLS, please see :doc:`../plugins/ssl_with_bad_version` for + a complete discussion. Specifically, This plugin test scans for specific + methods in Python's native SSL/TLS support and the pyOpenSSL module that + configure the version of SSL/TLS protocol to use. These methods are known + to provide default value that maximize compatibility, but permit use of the + aforementioned broken protocol versions. A LOW severity warning will be + reported whenever this is detected. + + **Config Options:** + + This test shares the configuration provided for the standard + :doc:`../plugins/ssl_with_bad_version` test, please refer to its + documentation. + + :Example: + + .. code-block:: none + + >> Issue: ssl.wrap_socket call with no SSL/TLS protocol version + specified, the default SSLv23 could be insecure, possible security + issue. + Severity: Low Confidence: Medium + Location: ./examples/ssl-insecure-version.py:23 + 22 + 23 ssl.wrap_socket() + 24 + + .. seealso:: + + - :func:`ssl_with_bad_version` + - :func:`ssl_with_bad_defaults` + - http://heartbleed.com/ + - https://poodlebleed.com/ + - https://security.openstack.org/ + - https://security.openstack.org/guidelines/dg_move-data-securely.html + + .. versionadded:: 0.9.0 + """ + if context.call_function_name_qual == 'ssl.wrap_socket': + if context.check_call_arg_value('ssl_version') is None: + # check_call_arg_value() returns False if the argument is found + # but does not match the supplied value (or the default None). + # It returns None if the arg_name passed doesn't exist. This + # tests for that (ssl_version is not specified). + return bandit.Issue( + severity=bandit.LOW, + confidence=bandit.MEDIUM, + text="ssl.wrap_socket call with no SSL/TLS protocol version " + "specified, the default SSLv23 could be insecure, " + "possible security issue.", + lineno=context.get_lineno_for_call_arg('ssl_version'), + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/jinja2_templates.py b/env_27/lib/python2.7/site-packages/bandit/plugins/jinja2_templates.py new file mode 100644 index 0000000..2adc77a --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/jinja2_templates.py @@ -0,0 +1,131 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +========================================== +B701: Test for not auto escaping in jinja2 +========================================== + +Jinja2 is a Python HTML templating system. It is typically used to build web +applications, though appears in other places well, notably the Ansible +automation system. When configuring the Jinja2 environment, the option to use +autoescaping on input can be specified. When autoescaping is enabled, Jinja2 +will filter input strings to escape any HTML content submitted via template +variables. Without escaping HTML input the application becomes vulnerable to +Cross Site Scripting (XSS) attacks. + +Unfortunately, autoescaping is False by default. Thus this plugin test will +warn on omission of an autoescape setting, as well as an explicit setting of +false. A HIGH severity warning is generated in either of these scenarios. + +:Example: + +.. code-block:: none + + >> Issue: Using jinja2 templates with autoescape=False is dangerous and can + lead to XSS. Use autoescape=True to mitigate XSS vulnerabilities. + Severity: High Confidence: High + Location: ./examples/jinja2_templating.py:11 + 10 templateEnv = jinja2.Environment(autoescape=False, + loader=templateLoader) + 11 Environment(loader=templateLoader, + 12 load=templateLoader, + 13 autoescape=False) + 14 + + >> Issue: By default, jinja2 sets autoescape to False. Consider using + autoescape=True or use the select_autoescape function to mitigate XSS + vulnerabilities. + Severity: High Confidence: High + Location: ./examples/jinja2_templating.py:15 + 14 + 15 Environment(loader=templateLoader, + 16 load=templateLoader) + 17 + 18 Environment(autoescape=select_autoescape(['html', 'htm', 'xml']), + 19 loader=templateLoader) + + +.. seealso:: + + - `OWASP XSS `_ + - https://realpython.com/blog/python/primer-on-jinja-templating/ + - http://jinja.pocoo.org/docs/dev/api/#autoescaping + - https://security.openstack.org + - https://security.openstack.org/guidelines/dg_cross-site-scripting-xss.html + +.. versionadded:: 0.10.0 + +""" + +import ast + +import bandit +from bandit.core import test_properties as test + + +@test.checks('Call') +@test.test_id('B701') +def jinja2_autoescape_false(context): + # check type just to be safe + if isinstance(context.call_function_name_qual, str): + qualname_list = context.call_function_name_qual.split('.') + func = qualname_list[-1] + if 'jinja2' in qualname_list and func == 'Environment': + for node in ast.walk(context.node): + if isinstance(node, ast.keyword): + # definite autoescape = False + if (getattr(node, 'arg', None) == 'autoescape' and + (getattr(node.value, 'id', None) == 'False' or + getattr(node.value, 'value', None) is False)): + return bandit.Issue( + severity=bandit.HIGH, + confidence=bandit.HIGH, + text="Using jinja2 templates with autoescape=" + "False is dangerous and can lead to XSS. " + "Use autoescape=True or use the " + "select_autoescape function to mitigate XSS " + "vulnerabilities." + ) + # found autoescape + if getattr(node, 'arg', None) == 'autoescape': + value = getattr(node, 'value', None) + if (getattr(value, 'id', None) == 'True' or + getattr(value, 'value', None) is True): + return + # Check if select_autoescape function is used. + elif isinstance(value, ast.Call) and getattr( + value.func, 'id', None) == 'select_autoescape': + return + else: + return bandit.Issue( + severity=bandit.HIGH, + confidence=bandit.MEDIUM, + text="Using jinja2 templates with autoescape=" + "False is dangerous and can lead to XSS. " + "Ensure autoescape=True or use the " + "select_autoescape function to mitigate " + "XSS vulnerabilities." + ) + # We haven't found a keyword named autoescape, indicating default + # behavior + return bandit.Issue( + severity=bandit.HIGH, + confidence=bandit.HIGH, + text="By default, jinja2 sets autoescape to False. Consider " + "using autoescape=True or use the select_autoescape " + "function to mitigate XSS vulnerabilities." + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/mako_templates.py b/env_27/lib/python2.7/site-packages/bandit/plugins/mako_templates.py new file mode 100644 index 0000000..cf315c6 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/mako_templates.py @@ -0,0 +1,76 @@ +# -*- coding:utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +==================================== +B702: Test for use of mako templates +==================================== + +Mako is a Python templating system often used to build web applications. It is +the default templating system used in Pylons and Pyramid. Unlike Jinja2 (an +alternative templating system), Mako has no environment wide variable escaping +mechanism. Because of this, all input variables must be carefully escaped +before use to prevent possible vulnerabilities to Cross Site Scripting (XSS) +attacks. + + +:Example: + +.. code-block:: none + + >> Issue: Mako templates allow HTML/JS rendering by default and are + inherently open to XSS attacks. Ensure variables in all templates are + properly sanitized via the 'n', 'h' or 'x' flags (depending on context). + For example, to HTML escape the variable 'data' do ${ data |h }. + Severity: Medium Confidence: High + Location: ./examples/mako_templating.py:10 + 9 + 10 mako.template.Template("hern") + 11 template.Template("hern") + + +.. seealso:: + + - http://www.makotemplates.org/ + - `OWASP XSS `_ + - https://security.openstack.org + - https://security.openstack.org/guidelines/dg_cross-site-scripting-xss.html + +.. versionadded:: 0.10.0 + +""" + +import bandit +from bandit.core import test_properties as test + + +@test.checks('Call') +@test.test_id('B702') +def use_of_mako_templates(context): + # check type just to be safe + if isinstance(context.call_function_name_qual, str): + qualname_list = context.call_function_name_qual.split('.') + func = qualname_list[-1] + if 'mako' in qualname_list and func == 'Template': + # unlike Jinja2, mako does not have a template wide autoescape + # feature and thus each variable must be carefully sanitized. + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.HIGH, + text="Mako templates allow HTML/JS rendering by default and " + "are inherently open to XSS attacks. Ensure variables " + "in all templates are properly sanitized via the 'n', " + "'h' or 'x' flags (depending on context). For example, " + "to HTML escape the variable 'data' do ${ data |h }." + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/ssh_no_host_key_verification.py b/env_27/lib/python2.7/site-packages/bandit/plugins/ssh_no_host_key_verification.py new file mode 100644 index 0000000..af5e23f --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/ssh_no_host_key_verification.py @@ -0,0 +1,65 @@ +# Copyright (c) 2018 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +========================================== +B507: Test for missing host key validation +========================================== + +Encryption in general is typically critical to the security of many +applications. Using SSH can greatly increase security by guaranteeing the +identity of the party you are communicating with. This is accomplished by one +or both parties presenting trusted host keys during the connection +initialization phase of SSH. + +When paramiko methods are used, host keys are verified by default. If host key +verification is disabled, Bandit will return a HIGH severity error. + +:Example: + +.. code-block:: none + + >> Issue: [B507:ssh_no_host_key_verification] Paramiko call with policy set + to automatically trust the unknown host key. + Severity: High Confidence: Medium + Location: examples/no_host_key_verification.py:4 + 3 ssh_client = client.SSHClient() + 4 ssh_client.set_missing_host_key_policy(client.AutoAddPolicy) + 5 ssh_client.set_missing_host_key_policy(client.WarningPolicy) + + +.. versionadded:: 1.5.1 + +""" + +import bandit +from bandit.core import test_properties as test + + +@test.checks('Call') +@test.test_id('B507') +def ssh_no_host_key_verification(context): + if (context.is_module_imported_like('paramiko') and + context.call_function_name == 'set_missing_host_key_policy'): + if (context.call_args and + context.call_args[0] in ['AutoAddPolicy', 'WarningPolicy']): + issue = bandit.Issue( + severity=bandit.HIGH, + confidence=bandit.MEDIUM, + text='Paramiko call with policy set to automatically trust ' + 'the unknown host key.', + lineno=context.get_lineno_for_call_arg( + 'set_missing_host_key_policy'), + ) + return issue diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/try_except_continue.py b/env_27/lib/python2.7/site-packages/bandit/plugins/try_except_continue.py new file mode 100644 index 0000000..1c70c54 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/try_except_continue.py @@ -0,0 +1,110 @@ +# Copyright 2016 IBM Corp. +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +============================================= +B112: Test for a continue in the except block +============================================= + +Errors in Python code bases are typically communicated using ``Exceptions``. +An exception object is 'raised' in the event of an error and can be 'caught' at +a later point in the program, typically some error handling or logging action +will then be performed. + +However, it is possible to catch an exception and silently ignore it while in +a loop. This is illustrated with the following example + +.. code-block:: python + + while keep_going: + try: + do_some_stuff() + except Exception: + continue + +This pattern is considered bad practice in general, but also represents a +potential security issue. A larger than normal volume of errors from a service +can indicate an attempt is being made to disrupt or interfere with it. Thus +errors should, at the very least, be logged. + +There are rare situations where it is desirable to suppress errors, but this is +typically done with specific exception types, rather than the base Exception +class (or no type). To accommodate this, the test may be configured to ignore +'try, except, continue' where the exception is typed. For example, the +following would not generate a warning if the configuration option +``checked_typed_exception`` is set to False: + +.. code-block:: python + + while keep_going: + try: + do_some_stuff() + except ZeroDivisionError: + continue + +**Config Options:** + +.. code-block:: yaml + + try_except_continue: + check_typed_exception: True + + +:Example: + +.. code-block:: none + + >> Issue: Try, Except, Continue detected. + Severity: Low Confidence: High + Location: ./examples/try_except_continue.py:5 + 4 a = i + 5 except: + 6 continue + +.. seealso:: + + - https://security.openstack.org + +.. versionadded:: 1.0.0 + +""" + +import ast + +import bandit +from bandit.core import test_properties as test + + +def gen_config(name): + if name == 'try_except_continue': + return {'check_typed_exception': False} + + +@test.takes_config +@test.checks('ExceptHandler') +@test.test_id('B112') +def try_except_continue(context, config): + node = context.node + if len(node.body) == 1: + if (not config['check_typed_exception'] and + node.type is not None and + getattr(node.type, 'id', None) != 'Exception'): + return + + if isinstance(node.body[0], ast.Continue): + return bandit.Issue( + severity=bandit.LOW, + confidence=bandit.HIGH, + text=("Try, Except, Continue detected.")) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/try_except_pass.py b/env_27/lib/python2.7/site-packages/bandit/plugins/try_except_pass.py new file mode 100644 index 0000000..9c82e84 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/try_except_pass.py @@ -0,0 +1,110 @@ +# -*- coding:utf-8 -*- +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +========================================= +B110: Test for a pass in the except block +========================================= + +Errors in Python code bases are typically communicated using ``Exceptions``. +An exception object is 'raised' in the event of an error and can be 'caught' at +a later point in the program, typically some error handling or logging action +will then be performed. + +However, it is possible to catch an exception and silently ignore it. This is +illustrated with the following example + +.. code-block:: python + + try: + do_some_stuff() + except Exception: + pass + +This pattern is considered bad practice in general, but also represents a +potential security issue. A larger than normal volume of errors from a service +can indicate an attempt is being made to disrupt or interfere with it. Thus +errors should, at the very least, be logged. + +There are rare situations where it is desirable to suppress errors, but this is +typically done with specific exception types, rather than the base Exception +class (or no type). To accommodate this, the test may be configured to ignore +'try, except, pass' where the exception is typed. For example, the following +would not generate a warning if the configuration option +``checked_typed_exception`` is set to False: + +.. code-block:: python + + try: + do_some_stuff() + except ZeroDivisionError: + pass + +**Config Options:** + +.. code-block:: yaml + + try_except_pass: + check_typed_exception: True + + +:Example: + +.. code-block:: none + + >> Issue: Try, Except, Pass detected. + Severity: Low Confidence: High + Location: ./examples/try_except_pass.py:4 + 3 a = 1 + 4 except: + 5 pass + +.. seealso:: + + - https://security.openstack.org + +.. versionadded:: 0.13.0 + +""" + +import ast + +import bandit +from bandit.core import test_properties as test + + +def gen_config(name): + if name == 'try_except_pass': + return {'check_typed_exception': False} + + +@test.takes_config +@test.checks('ExceptHandler') +@test.test_id('B110') +def try_except_pass(context, config): + node = context.node + if len(node.body) == 1: + if (not config['check_typed_exception'] and + node.type is not None and + getattr(node.type, 'id', None) != 'Exception'): + return + + if isinstance(node.body[0], ast.Pass): + return bandit.Issue( + severity=bandit.LOW, + confidence=bandit.HIGH, + text=("Try, Except, Pass detected.") + ) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/weak_cryptographic_key.py b/env_27/lib/python2.7/site-packages/bandit/plugins/weak_cryptographic_key.py new file mode 100644 index 0000000..1c80e64 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/weak_cryptographic_key.py @@ -0,0 +1,140 @@ +# Copyright (c) 2015 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +========================================= +B505: Test for weak cryptographic key use +========================================= + +As computational power increases, so does the ability to break ciphers with +smaller key lengths. The recommended key length size for RSA and DSA algorithms +is 2048 and higher. 1024 bits and below are now considered breakable. EC key +length sizes are recommended to be 224 and higher with 160 and below considered +breakable. This plugin test checks for use of any key less than those limits +and returns a high severity error if lower than the lower threshold and a +medium severity error for those lower than the higher threshold. + +:Example: + +.. code-block:: none + + >> Issue: DSA key sizes below 1024 bits are considered breakable. + Severity: High Confidence: High + Location: examples/weak_cryptographic_key_sizes.py:36 + 35 # Also incorrect: without keyword args + 36 dsa.generate_private_key(512, + 37 backends.default_backend()) + 38 rsa.generate_private_key(3, + +.. seealso:: + + - http://csrc.nist.gov/publications/nistpubs/800-131A/sp800-131A.pdf + - https://security.openstack.org/guidelines/dg_strong-crypto.html + +.. versionadded:: 0.14.0 + +""" + +import bandit +from bandit.core import test_properties as test + + +def gen_config(name): + if name == 'weak_cryptographic_key': + return { + 'weak_key_size_dsa_high': 1024, + 'weak_key_size_dsa_medium': 2048, + 'weak_key_size_rsa_high': 1024, + 'weak_key_size_rsa_medium': 2048, + 'weak_key_size_ec_high': 160, + 'weak_key_size_ec_medium': 224, + } + + +def _classify_key_size(config, key_type, key_size): + if isinstance(key_size, str): + # size provided via a variable - can't process it at the moment + return + + key_sizes = { + 'DSA': [(config['weak_key_size_dsa_high'], bandit.HIGH), + (config['weak_key_size_dsa_medium'], bandit.MEDIUM)], + 'RSA': [(config['weak_key_size_rsa_high'], bandit.HIGH), + (config['weak_key_size_rsa_medium'], bandit.MEDIUM)], + 'EC': [(config['weak_key_size_ec_high'], bandit.HIGH), + (config['weak_key_size_ec_medium'], bandit.MEDIUM)], + } + + for size, level in key_sizes[key_type]: + if key_size < size: + return bandit.Issue( + severity=level, + confidence=bandit.HIGH, + text='%s key sizes below %d bits are considered breakable. ' % + (key_type, size)) + + +def _weak_crypto_key_size_cryptography_io(context, config): + func_key_type = { + 'cryptography.hazmat.primitives.asymmetric.dsa.' + 'generate_private_key': 'DSA', + 'cryptography.hazmat.primitives.asymmetric.rsa.' + 'generate_private_key': 'RSA', + 'cryptography.hazmat.primitives.asymmetric.ec.' + 'generate_private_key': 'EC', + } + arg_position = { + 'DSA': 0, + 'RSA': 1, + 'EC': 0, + } + key_type = func_key_type.get(context.call_function_name_qual) + if key_type in ['DSA', 'RSA']: + key_size = (context.get_call_arg_value('key_size') or + context.get_call_arg_at_position(arg_position[key_type]) or + 2048) + return _classify_key_size(config, key_type, key_size) + elif key_type == 'EC': + curve_key_sizes = { + 'SECP192R1': 192, + 'SECT163K1': 163, + 'SECT163R2': 163, + } + curve = (context.get_call_arg_value('curve') or + context.call_args[arg_position[key_type]]) + key_size = curve_key_sizes[curve] if curve in curve_key_sizes else 224 + return _classify_key_size(config, key_type, key_size) + + +def _weak_crypto_key_size_pycrypto(context, config): + func_key_type = { + 'Crypto.PublicKey.DSA.generate': 'DSA', + 'Crypto.PublicKey.RSA.generate': 'RSA', + 'Cryptodome.PublicKey.DSA.generate': 'DSA', + 'Cryptodome.PublicKey.RSA.generate': 'RSA', + } + key_type = func_key_type.get(context.call_function_name_qual) + if key_type: + key_size = (context.get_call_arg_value('bits') or + context.get_call_arg_at_position(0) or + 2048) + return _classify_key_size(config, key_type, key_size) + + +@test.takes_config +@test.checks('Call') +@test.test_id('B505') +def weak_cryptographic_key(context, config): + return (_weak_crypto_key_size_cryptography_io(context, config) or + _weak_crypto_key_size_pycrypto(context, config)) diff --git a/env_27/lib/python2.7/site-packages/bandit/plugins/yaml_load.py b/env_27/lib/python2.7/site-packages/bandit/plugins/yaml_load.py new file mode 100644 index 0000000..da2353b --- /dev/null +++ b/env_27/lib/python2.7/site-packages/bandit/plugins/yaml_load.py @@ -0,0 +1,71 @@ +# -*- coding:utf-8 -*- +# +# Copyright (c) 2016 Rackspace, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +=============================== +B506: Test for use of yaml load +=============================== + +This plugin test checks for the unsafe usage of the ``yaml.load`` function from +the PyYAML package. The yaml.load function provides the ability to construct +an arbitrary Python object, which may be dangerous if you receive a YAML +document from an untrusted source. The function yaml.safe_load limits this +ability to simple Python objects like integers or lists. + +Please see +http://pyyaml.org/wiki/PyYAMLDocumentation#LoadingYAML for more information +on ``yaml.load`` and yaml.safe_load + +:Example: + + >> Issue: [yaml_load] Use of unsafe yaml load. Allows instantiation of + arbitrary objects. Consider yaml.safe_load(). + Severity: Medium Confidence: High + Location: examples/yaml_load.py:5 + 4 ystr = yaml.dump({'a' : 1, 'b' : 2, 'c' : 3}) + 5 y = yaml.load(ystr) + 6 yaml.dump(y) + + +.. seealso:: + + - http://pyyaml.org/wiki/PyYAMLDocumentation#LoadingYAML + +.. versionadded:: 1.0.0 + +""" + +import bandit +from bandit.core import test_properties as test + + +@test.test_id('B506') +@test.checks('Call') +def yaml_load(context): + imported = context.is_module_imported_exact('yaml') + qualname = context.call_function_name_qual + if imported and isinstance(qualname, str): + qualname_list = qualname.split('.') + func = qualname_list[-1] + if 'yaml' in qualname_list and func == 'load': + if not context.check_call_arg_value('Loader', 'SafeLoader'): + return bandit.Issue( + severity=bandit.MEDIUM, + confidence=bandit.HIGH, + text="Use of unsafe yaml load. Allows instantiation of" + " arbitrary objects. Consider yaml.safe_load().", + lineno=context.node.lineno, + ) diff --git a/env_27/lib/python2.7/site-packages/concurrent/__init__.py b/env_27/lib/python2.7/site-packages/concurrent/__init__.py new file mode 100644 index 0000000..b36383a --- /dev/null +++ b/env_27/lib/python2.7/site-packages/concurrent/__init__.py @@ -0,0 +1,3 @@ +from pkgutil import extend_path + +__path__ = extend_path(__path__, __name__) diff --git a/env_27/lib/python2.7/site-packages/concurrent/futures/__init__.py b/env_27/lib/python2.7/site-packages/concurrent/futures/__init__.py new file mode 100644 index 0000000..428b14b --- /dev/null +++ b/env_27/lib/python2.7/site-packages/concurrent/futures/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Execute computations asynchronously using threads or processes.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +from concurrent.futures._base import (FIRST_COMPLETED, + FIRST_EXCEPTION, + ALL_COMPLETED, + CancelledError, + TimeoutError, + Future, + Executor, + wait, + as_completed) +from concurrent.futures.thread import ThreadPoolExecutor + +try: + from concurrent.futures.process import ProcessPoolExecutor +except ImportError: + # some platforms don't have multiprocessing + pass diff --git a/env_27/lib/python2.7/site-packages/concurrent/futures/_base.py b/env_27/lib/python2.7/site-packages/concurrent/futures/_base.py new file mode 100644 index 0000000..510ffa5 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/concurrent/futures/_base.py @@ -0,0 +1,667 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +import collections +import logging +import threading +import itertools +import time +import types + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +FIRST_COMPLETED = 'FIRST_COMPLETED' +FIRST_EXCEPTION = 'FIRST_EXCEPTION' +ALL_COMPLETED = 'ALL_COMPLETED' +_AS_COMPLETED = '_AS_COMPLETED' + +# Possible future states (for internal use by the futures package). +PENDING = 'PENDING' +RUNNING = 'RUNNING' +# The future was cancelled by the user... +CANCELLED = 'CANCELLED' +# ...and _Waiter.add_cancelled() was called by a worker. +CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' +FINISHED = 'FINISHED' + +_FUTURE_STATES = [ + PENDING, + RUNNING, + CANCELLED, + CANCELLED_AND_NOTIFIED, + FINISHED +] + +_STATE_TO_DESCRIPTION_MAP = { + PENDING: "pending", + RUNNING: "running", + CANCELLED: "cancelled", + CANCELLED_AND_NOTIFIED: "cancelled", + FINISHED: "finished" +} + +# Logger for internal use by the futures package. +LOGGER = logging.getLogger("concurrent.futures") + +class Error(Exception): + """Base class for all future-related exceptions.""" + pass + +class CancelledError(Error): + """The Future was cancelled.""" + pass + +class TimeoutError(Error): + """The operation exceeded the given deadline.""" + pass + +class _Waiter(object): + """Provides the event that wait() and as_completed() block on.""" + def __init__(self): + self.event = threading.Event() + self.finished_futures = [] + + def add_result(self, future): + self.finished_futures.append(future) + + def add_exception(self, future): + self.finished_futures.append(future) + + def add_cancelled(self, future): + self.finished_futures.append(future) + +class _AsCompletedWaiter(_Waiter): + """Used by as_completed().""" + + def __init__(self): + super(_AsCompletedWaiter, self).__init__() + self.lock = threading.Lock() + + def add_result(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _FirstCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_COMPLETED).""" + + def add_result(self, future): + super(_FirstCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + super(_FirstCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + super(_FirstCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _AllCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" + + def __init__(self, num_pending_calls, stop_on_exception): + self.num_pending_calls = num_pending_calls + self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() + super(_AllCompletedWaiter, self).__init__() + + def _decrement_pending_calls(self): + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() + + def add_result(self, future): + super(_AllCompletedWaiter, self).add_result(future) + self._decrement_pending_calls() + + def add_exception(self, future): + super(_AllCompletedWaiter, self).add_exception(future) + if self.stop_on_exception: + self.event.set() + else: + self._decrement_pending_calls() + + def add_cancelled(self, future): + super(_AllCompletedWaiter, self).add_cancelled(future) + self._decrement_pending_calls() + +class _AcquireFutures(object): + """A context manager that does an ordered acquire of Future conditions.""" + + def __init__(self, futures): + self.futures = sorted(futures, key=id) + + def __enter__(self): + for future in self.futures: + future._condition.acquire() + + def __exit__(self, *args): + for future in self.futures: + future._condition.release() + +def _create_and_install_waiters(fs, return_when): + if return_when == _AS_COMPLETED: + waiter = _AsCompletedWaiter() + elif return_when == FIRST_COMPLETED: + waiter = _FirstCompletedWaiter() + else: + pending_count = sum( + f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) + + if return_when == FIRST_EXCEPTION: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) + elif return_when == ALL_COMPLETED: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) + else: + raise ValueError("Invalid return condition: %r" % return_when) + + for f in fs: + f._waiters.append(waiter) + + return waiter + + +def _yield_finished_futures(fs, waiter, ref_collect): + """ + Iterate on the list *fs*, yielding finished futures one by one in + reverse order. + Before yielding a future, *waiter* is removed from its waiters + and the future is removed from each set in the collection of sets + *ref_collect*. + + The aim of this function is to avoid keeping stale references after + the future is yielded and before the iterator resumes. + """ + while fs: + f = fs[-1] + for futures_set in ref_collect: + futures_set.remove(f) + with f._condition: + f._waiters.remove(waiter) + del f + # Careful not to keep a reference to the popped value + yield fs.pop() + + +def as_completed(fs, timeout=None): + """An iterator over the given futures that yields each as it completes. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + iterate over. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator that yields the given Futures as they complete (finished or + cancelled). If any given Futures are duplicated, they will be returned + once. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + """ + if timeout is not None: + end_time = timeout + time.time() + + fs = set(fs) + total_futures = len(fs) + with _AcquireFutures(fs): + finished = set( + f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + pending = fs - finished + waiter = _create_and_install_waiters(fs, _AS_COMPLETED) + finished = list(finished) + try: + for f in _yield_finished_futures(finished, waiter, + ref_collect=(fs,)): + f = [f] + yield f.pop() + + while pending: + if timeout is None: + wait_timeout = None + else: + wait_timeout = end_time - time.time() + if wait_timeout < 0: + raise TimeoutError( + '%d (of %d) futures unfinished' % ( + len(pending), total_futures)) + + waiter.event.wait(wait_timeout) + + with waiter.lock: + finished = waiter.finished_futures + waiter.finished_futures = [] + waiter.event.clear() + + # reverse to keep finishing order + finished.reverse() + for f in _yield_finished_futures(finished, waiter, + ref_collect=(fs, pending)): + f = [f] + yield f.pop() + + finally: + # Remove waiter from unfinished futures + for f in fs: + with f._condition: + f._waiters.remove(waiter) + +DoneAndNotDoneFutures = collections.namedtuple( + 'DoneAndNotDoneFutures', 'done not_done') +def wait(fs, timeout=None, return_when=ALL_COMPLETED): + """Wait for the futures in the given sequence to complete. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + wait upon. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + return_when: Indicates when this function should return. The options + are: + + FIRST_COMPLETED - Return when any future finishes or is + cancelled. + FIRST_EXCEPTION - Return when any future finishes by raising an + exception. If no future raises an exception + then it is equivalent to ALL_COMPLETED. + ALL_COMPLETED - Return when all futures finish or are cancelled. + + Returns: + A named 2-tuple of sets. The first set, named 'done', contains the + futures that completed (is finished or cancelled) before the wait + completed. The second set, named 'not_done', contains uncompleted + futures. + """ + with _AcquireFutures(fs): + done = set(f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + not_done = set(fs) - done + + if (return_when == FIRST_COMPLETED) and done: + return DoneAndNotDoneFutures(done, not_done) + elif (return_when == FIRST_EXCEPTION) and done: + if any(f for f in done + if not f.cancelled() and f.exception() is not None): + return DoneAndNotDoneFutures(done, not_done) + + if len(done) == len(fs): + return DoneAndNotDoneFutures(done, not_done) + + waiter = _create_and_install_waiters(fs, return_when) + + waiter.event.wait(timeout) + for f in fs: + with f._condition: + f._waiters.remove(waiter) + + done.update(waiter.finished_futures) + return DoneAndNotDoneFutures(done, set(fs) - done) + +class Future(object): + """Represents the result of an asynchronous computation.""" + + def __init__(self): + """Initializes the future. Should not be called by clients.""" + self._condition = threading.Condition() + self._state = PENDING + self._result = None + self._exception = None + self._traceback = None + self._waiters = [] + self._done_callbacks = [] + + def _invoke_callbacks(self): + for callback in self._done_callbacks: + try: + callback(self) + except Exception: + LOGGER.exception('exception calling callback for %r', self) + except BaseException: + # Explicitly let all other new-style exceptions through so + # that we can catch all old-style exceptions with a simple + # "except:" clause below. + # + # All old-style exception objects are instances of + # types.InstanceType, but "except types.InstanceType:" does + # not catch old-style exceptions for some reason. Thus, the + # only way to catch all old-style exceptions without catching + # any new-style exceptions is to filter out the new-style + # exceptions, which all derive from BaseException. + raise + except: + # Because of the BaseException clause above, this handler only + # executes for old-style exception objects. + LOGGER.exception('exception calling callback for %r', self) + + def __repr__(self): + with self._condition: + if self._state == FINISHED: + if self._exception: + return '<%s at %#x state=%s raised %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._exception.__class__.__name__) + else: + return '<%s at %#x state=%s returned %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._result.__class__.__name__) + return '<%s at %#x state=%s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state]) + + def cancel(self): + """Cancel the future if possible. + + Returns True if the future was cancelled, False otherwise. A future + cannot be cancelled if it is running or has already completed. + """ + with self._condition: + if self._state in [RUNNING, FINISHED]: + return False + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + return True + + self._state = CANCELLED + self._condition.notify_all() + + self._invoke_callbacks() + return True + + def cancelled(self): + """Return True if the future was cancelled.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] + + def running(self): + """Return True if the future is currently executing.""" + with self._condition: + return self._state == RUNNING + + def done(self): + """Return True of the future was cancelled or finished executing.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] + + def __get_result(self): + if self._exception: + if isinstance(self._exception, types.InstanceType): + # The exception is an instance of an old-style class, which + # means type(self._exception) returns types.ClassType instead + # of the exception's actual class type. + exception_type = self._exception.__class__ + else: + exception_type = type(self._exception) + raise exception_type, self._exception, self._traceback + else: + return self._result + + def add_done_callback(self, fn): + """Attaches a callable that will be called when the future finishes. + + Args: + fn: A callable that will be called with this future as its only + argument when the future completes or is cancelled. The callable + will always be called by a thread in the same process in which + it was added. If the future has already completed or been + cancelled then the callable will be called immediately. These + callables are called in the order that they were added. + """ + with self._condition: + if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: + self._done_callbacks.append(fn) + return + fn(self) + + def result(self, timeout=None): + """Return the result of the call that the future represents. + + Args: + timeout: The number of seconds to wait for the result if the future + isn't done. If None, then there is no limit on the wait time. + + Returns: + The result of the call that the future represents. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + Exception: If the call raised then that exception will be raised. + """ + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + else: + raise TimeoutError() + + def exception_info(self, timeout=None): + """Return a tuple of (exception, traceback) raised by the call that the + future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + """ + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception, self._traceback + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception, self._traceback + else: + raise TimeoutError() + + def exception(self, timeout=None): + """Return the exception raised by the call that the future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + """ + return self.exception_info(timeout)[0] + + # The following methods should only be used by Executors and in tests. + def set_running_or_notify_cancel(self): + """Mark the future as running or process any cancel notifications. + + Should only be used by Executor implementations and unit tests. + + If the future has been cancelled (cancel() was called and returned + True) then any threads waiting on the future completing (though calls + to as_completed() or wait()) are notified and False is returned. + + If the future was not cancelled then it is put in the running state + (future calls to running() will return True) and True is returned. + + This method should be called by Executor implementations before + executing the work associated with this future. If this method returns + False then the work should not be executed. + + Returns: + False if the Future was cancelled, True otherwise. + + Raises: + RuntimeError: if this method was already called or if set_result() + or set_exception() was called. + """ + with self._condition: + if self._state == CANCELLED: + self._state = CANCELLED_AND_NOTIFIED + for waiter in self._waiters: + waiter.add_cancelled(self) + # self._condition.notify_all() is not necessary because + # self.cancel() triggers a notification. + return False + elif self._state == PENDING: + self._state = RUNNING + return True + else: + LOGGER.critical('Future %s in unexpected state: %s', + id(self), + self._state) + raise RuntimeError('Future in unexpected state') + + def set_result(self, result): + """Sets the return value of work associated with the future. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + self._result = result + self._state = FINISHED + for waiter in self._waiters: + waiter.add_result(self) + self._condition.notify_all() + self._invoke_callbacks() + + def set_exception_info(self, exception, traceback): + """Sets the result of the future as being the given exception + and traceback. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + self._exception = exception + self._traceback = traceback + self._state = FINISHED + for waiter in self._waiters: + waiter.add_exception(self) + self._condition.notify_all() + self._invoke_callbacks() + + def set_exception(self, exception): + """Sets the result of the future as being the given exception. + + Should only be used by Executor implementations and unit tests. + """ + self.set_exception_info(exception, None) + +class Executor(object): + """This is an abstract base class for concrete asynchronous executors.""" + + def submit(self, fn, *args, **kwargs): + """Submits a callable to be executed with the given arguments. + + Schedules the callable to be executed as fn(*args, **kwargs) and returns + a Future instance representing the execution of the callable. + + Returns: + A Future representing the given call. + """ + raise NotImplementedError() + + def map(self, fn, *iterables, **kwargs): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + timeout = kwargs.get('timeout') + if timeout is not None: + end_time = timeout + time.time() + + fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)] + + # Yield must be hidden in closure so that the futures are submitted + # before the first iterator value is required. + def result_iterator(): + try: + # reverse to keep finishing order + fs.reverse() + while fs: + # Careful not to keep a reference to the popped future + if timeout is None: + yield fs.pop().result() + else: + yield fs.pop().result(end_time - time.time()) + finally: + for future in fs: + future.cancel() + return result_iterator() + + def shutdown(self, wait=True): + """Clean-up the resources associated with the Executor. + + It is safe to call this method several times. Otherwise, no other + methods can be called after this one. + + Args: + wait: If True then shutdown will not return until all running + futures have finished executing and the resources used by the + executor have been reclaimed. + """ + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown(wait=True) + return False diff --git a/env_27/lib/python2.7/site-packages/concurrent/futures/process.py b/env_27/lib/python2.7/site-packages/concurrent/futures/process.py new file mode 100644 index 0000000..fa5b96f --- /dev/null +++ b/env_27/lib/python2.7/site-packages/concurrent/futures/process.py @@ -0,0 +1,363 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ProcessPoolExecutor. + +The follow diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | => | | => | Call Q | => | | +| | +----------+ | | +-----------+ | | +| | | ... | | | | ... | | | +| | | 6 | | | | 5, call() | | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | | | | 4, result | | | +| | | ... | | | | 3, except | | | ++----------+ +------------+ +--------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Request Q" +""" + +import atexit +from concurrent.futures import _base +import Queue as queue +import multiprocessing +import threading +import weakref +import sys + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +# Workers are created as daemon threads and processes. This is done to allow the +# interpreter to exit when there are still idle processes in a +# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, +# allowing workers to die with the interpreter has two undesirable properties: +# - The workers would still be running during interpretor shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads/processes finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) if _threads_queues else () + for t, q in items: + q.put(None) + for t, q in items: + t.join(sys.maxint) + +# Controls how many more calls than processes will be queued in the call queue. +# A smaller number will mean that processes spend more time idle waiting for +# work while a larger number will make Future.cancel() succeed less frequently +# (Futures in the call queue cannot be cancelled). +EXTRA_QUEUED_CALLS = 1 + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + +class _ResultItem(object): + def __init__(self, work_id, exception=None, result=None): + self.work_id = work_id + self.exception = exception + self.result = result + +class _CallItem(object): + def __init__(self, work_id, fn, args, kwargs): + self.work_id = work_id + self.fn = fn + self.args = args + self.kwargs = kwargs + +def _process_worker(call_queue, result_queue): + """Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A multiprocessing.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A multiprocessing.Queue of _ResultItems that will written + to by the worker. + shutdown: A multiprocessing.Event that will be set as a signal to the + worker that it should exit when call_queue is empty. + """ + while True: + call_item = call_queue.get(block=True) + if call_item is None: + # Wake up queue management thread + result_queue.put(None) + return + try: + r = call_item.fn(*call_item.args, **call_item.kwargs) + except: + e = sys.exc_info()[1] + result_queue.put(_ResultItem(call_item.work_id, + exception=e)) + else: + result_queue.put(_ResultItem(call_item.work_id, + result=r)) + +def _add_call_item_to_queue(pending_work_items, + work_ids, + call_queue): + """Fills call_queue with _WorkItems from pending_work_items. + + This function never blocks. + + Args: + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids + are consumed and the corresponding _WorkItems from + pending_work_items are transformed into _CallItems and put in + call_queue. + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems. + """ + while True: + if call_queue.full(): + return + try: + work_id = work_ids.get(block=False) + except queue.Empty: + return + else: + work_item = pending_work_items[work_id] + + if work_item.future.set_running_or_notify_cancel(): + call_queue.put(_CallItem(work_id, + work_item.fn, + work_item.args, + work_item.kwargs), + block=True) + else: + del pending_work_items[work_id] + continue + +def _queue_management_worker(executor_reference, + processes, + pending_work_items, + work_ids_queue, + call_queue, + result_queue): + """Manages the communication between this process and the worker processes. + + This function is run in a local thread. + + Args: + executor_reference: A weakref.ref to the ProcessPoolExecutor that owns + this thread. Used to determine if the ProcessPoolExecutor has been + garbage collected and that this function can exit. + process: A list of the multiprocessing.Process instances used as + workers. + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems for processing by the process workers. + result_queue: A multiprocessing.Queue of _ResultItems generated by the + process workers. + """ + nb_shutdown_processes = [0] + def shutdown_one_process(): + """Tell a worker to terminate, which will in turn wake us again""" + call_queue.put(None) + nb_shutdown_processes[0] += 1 + while True: + _add_call_item_to_queue(pending_work_items, + work_ids_queue, + call_queue) + + result_item = result_queue.get(block=True) + if result_item is not None: + work_item = pending_work_items[result_item.work_id] + del pending_work_items[result_item.work_id] + + if result_item.exception: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) + # Delete references to object. See issue16284 + del work_item + # Check whether we should start shutting down. + executor = executor_reference() + # No more work items can be added if: + # - The interpreter is shutting down OR + # - The executor that owns this worker has been collected OR + # - The executor that owns this worker has been shutdown. + if _shutdown or executor is None or executor._shutdown_thread: + # Since no new work items can be added, it is safe to shutdown + # this thread if there are no pending work items. + if not pending_work_items: + while nb_shutdown_processes[0] < len(processes): + shutdown_one_process() + # If .join() is not called on the created processes then + # some multiprocessing.Queue methods may deadlock on Mac OS + # X. + for p in processes: + p.join() + call_queue.close() + return + del executor + +_system_limits_checked = False +_system_limited = None +def _check_system_limits(): + global _system_limits_checked, _system_limited + if _system_limits_checked: + if _system_limited: + raise NotImplementedError(_system_limited) + _system_limits_checked = True + try: + import os + nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems_max == -1: + # indetermine limit, assume that limit is determined + # by available memory only + return + if nsems_max >= 256: + # minimum number of semaphores available + # according to POSIX + return + _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max + raise NotImplementedError(_system_limited) + + +class ProcessPoolExecutor(_base.Executor): + def __init__(self, max_workers=None): + """Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: The maximum number of processes that can be used to + execute the given calls. If None or not given then as many + worker processes will be created as the machine has processors. + """ + _check_system_limits() + + if max_workers is None: + self._max_workers = multiprocessing.cpu_count() + else: + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + self._max_workers = max_workers + + # Make the call queue slightly larger than the number of processes to + # prevent the worker processes from idling. But don't make it too big + # because futures in the call queue cannot be cancelled. + self._call_queue = multiprocessing.Queue(self._max_workers + + EXTRA_QUEUED_CALLS) + self._result_queue = multiprocessing.Queue() + self._work_ids = queue.Queue() + self._queue_management_thread = None + self._processes = set() + + # Shutdown is a two-step process. + self._shutdown_thread = False + self._shutdown_lock = threading.Lock() + self._queue_count = 0 + self._pending_work_items = {} + + def _start_queue_management_thread(self): + # When the executor gets lost, the weakref callback will wake up + # the queue management thread. + def weakref_cb(_, q=self._result_queue): + q.put(None) + if self._queue_management_thread is None: + self._queue_management_thread = threading.Thread( + target=_queue_management_worker, + args=(weakref.ref(self, weakref_cb), + self._processes, + self._pending_work_items, + self._work_ids, + self._call_queue, + self._result_queue)) + self._queue_management_thread.daemon = True + self._queue_management_thread.start() + _threads_queues[self._queue_management_thread] = self._result_queue + + def _adjust_process_count(self): + for _ in range(len(self._processes), self._max_workers): + p = multiprocessing.Process( + target=_process_worker, + args=(self._call_queue, + self._result_queue)) + p.start() + self._processes.add(p) + + def submit(self, fn, *args, **kwargs): + with self._shutdown_lock: + if self._shutdown_thread: + raise RuntimeError('cannot schedule new futures after shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._pending_work_items[self._queue_count] = w + self._work_ids.put(self._queue_count) + self._queue_count += 1 + # Wake up queue management thread + self._result_queue.put(None) + + self._start_queue_management_thread() + self._adjust_process_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown_thread = True + if self._queue_management_thread: + # Wake up queue management thread + self._result_queue.put(None) + if wait: + self._queue_management_thread.join(sys.maxint) + # To reduce the risk of openning too many files, remove references to + # objects that use file descriptors. + self._queue_management_thread = None + self._call_queue = None + self._result_queue = None + self._processes = None + shutdown.__doc__ = _base.Executor.shutdown.__doc__ + +atexit.register(_python_exit) diff --git a/env_27/lib/python2.7/site-packages/concurrent/futures/thread.py b/env_27/lib/python2.7/site-packages/concurrent/futures/thread.py new file mode 100644 index 0000000..bb0ce9d --- /dev/null +++ b/env_27/lib/python2.7/site-packages/concurrent/futures/thread.py @@ -0,0 +1,160 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ThreadPoolExecutor.""" + +import atexit +from concurrent.futures import _base +import itertools +import Queue as queue +import threading +import weakref +import sys + +try: + from multiprocessing import cpu_count +except ImportError: + # some platforms don't have multiprocessing + def cpu_count(): + return None + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +# Workers are created as daemon threads. This is done to allow the interpreter +# to exit when there are still idle threads in a ThreadPoolExecutor's thread +# pool (i.e. shutdown() was not called). However, allowing workers to die with +# the interpreter has two undesirable properties: +# - The workers would still be running during interpretor shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) if _threads_queues else () + for t, q in items: + q.put(None) + for t, q in items: + t.join(sys.maxint) + +atexit.register(_python_exit) + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + def run(self): + if not self.future.set_running_or_notify_cancel(): + return + + try: + result = self.fn(*self.args, **self.kwargs) + except: + e, tb = sys.exc_info()[1:] + self.future.set_exception_info(e, tb) + else: + self.future.set_result(result) + +def _worker(executor_reference, work_queue): + try: + while True: + work_item = work_queue.get(block=True) + if work_item is not None: + work_item.run() + # Delete references to object. See issue16284 + del work_item + continue + executor = executor_reference() + # Exit if: + # - The interpreter is shutting down OR + # - The executor that owns the worker has been collected OR + # - The executor that owns the worker has been shutdown. + if _shutdown or executor is None or executor._shutdown: + # Notice other workers + work_queue.put(None) + return + del executor + except: + _base.LOGGER.critical('Exception in worker', exc_info=True) + + +class ThreadPoolExecutor(_base.Executor): + + # Used to assign unique thread names when thread_name_prefix is not supplied. + _counter = itertools.count().next + + def __init__(self, max_workers=None, thread_name_prefix=''): + """Initializes a new ThreadPoolExecutor instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + """ + if max_workers is None: + # Use this number because ThreadPoolExecutor is often + # used to overlap I/O instead of CPU work. + max_workers = (cpu_count() or 1) * 5 + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + self._max_workers = max_workers + self._work_queue = queue.Queue() + self._threads = set() + self._shutdown = False + self._shutdown_lock = threading.Lock() + self._thread_name_prefix = (thread_name_prefix or + ("ThreadPoolExecutor-%d" % self._counter())) + + def submit(self, fn, *args, **kwargs): + with self._shutdown_lock: + if self._shutdown: + raise RuntimeError('cannot schedule new futures after shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._work_queue.put(w) + self._adjust_thread_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def _adjust_thread_count(self): + # When the executor gets lost, the weakref callback will wake up + # the worker threads. + def weakref_cb(_, q=self._work_queue): + q.put(None) + # TODO(bquinlan): Should avoid creating new threads if there are more + # idle threads than items in the work queue. + num_threads = len(self._threads) + if num_threads < self._max_workers: + thread_name = '%s_%d' % (self._thread_name_prefix or self, + num_threads) + t = threading.Thread(name=thread_name, target=_worker, + args=(weakref.ref(self, weakref_cb), + self._work_queue)) + t.daemon = True + t.start() + self._threads.add(t) + _threads_queues[t] = self._work_queue + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown = True + self._work_queue.put(None) + if wait: + for t in self._threads: + t.join(sys.maxint) + shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/DESCRIPTION.rst b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..7b23dd6 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/DESCRIPTION.rst @@ -0,0 +1,134 @@ +.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +.. For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +=========== +Coverage.py +=========== + +Code coverage testing for Python. + +| |license| |versions| |status| |docs| +| |ci-status| |win-ci-status| |codecov| +| |kit| |format| |repos| +| |tidelift| |saythanks| + +.. downloads badge seems to be broken... |downloads| + +Coverage.py measures code coverage, typically during test execution. It uses +the code analysis tools and tracing hooks provided in the Python standard +library to determine which lines are executable, and which have been executed. + +.. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logos_RGB_Tidelift_Shorthand_On-White_small.png + :width: 75 + :alt: Tidelift + +.. list-table:: + :widths: 10 100 + + * - |tideliftlogo| + - Professional support for coverage.py is available as part of the `Tidelift + Subscription`_. Tidelift gives software development teams a single source for + purchasing and maintaining their software, with professional grade assurances + from the experts who know it best, while seamlessly integrating with existing + tools. + +.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme + +Coverage.py runs on many versions of Python: + +* CPython 2.6, 2.7 and 3.3 through alpha 3.8. +* PyPy2 6.0 and PyPy3 6.0. +* Jython 2.7.1, though not for reporting. +* IronPython 2.7.7, though not for reporting. + +Documentation is on `Read the Docs`_. Code repository and issue tracker are on +`GitHub`_. + +.. _Read the Docs: https://coverage.readthedocs.io/ +.. _GitHub: https://github.com/nedbat/coveragepy + + +**New in 4.5:** Configurator plug-ins. + +New in 4.4: Suppressable warnings, continuous coverage measurement. + +New in 4.3: HTML ``--skip-covered``, sys.excepthook support, tox.ini +support. + +New in 4.2: better support for multiprocessing and combining data. + +New in 4.1: much-improved branch coverage. + +New in 4.0: ``--concurrency``, plugins for non-Python files, setup.cfg +support, --skip-covered, HTML filtering, and more than 50 issues closed. + + +Getting Started +--------------- + +See the `Quick Start section`_ of the docs. + +.. _Quick Start section: https://coverage.readthedocs.io/#quick-start + + +Contributing +------------ + +See the `Contributing section`_ of the docs. + +.. _Contributing section: https://coverage.readthedocs.io/en/latest/contributing.html + + +License +------- + +Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_. + +.. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 +.. _NOTICE.txt: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + + +.. |ci-status| image:: https://travis-ci.org/nedbat/coveragepy.svg?branch=master + :target: https://travis-ci.org/nedbat/coveragepy + :alt: Build status +.. |win-ci-status| image:: https://ci.appveyor.com/api/projects/status/kmeqpdje7h9r6vsf/branch/master?svg=true + :target: https://ci.appveyor.com/project/nedbat/coveragepy + :alt: Windows build status +.. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat + :target: https://coverage.readthedocs.io/ + :alt: Documentation +.. |reqs| image:: https://requires.io/github/nedbat/coveragepy/requirements.svg?branch=master + :target: https://requires.io/github/nedbat/coveragepy/requirements/?branch=master + :alt: Requirements status +.. |kit| image:: https://badge.fury.io/py/coverage.svg + :target: https://pypi.python.org/pypi/coverage + :alt: PyPI status +.. |format| image:: https://img.shields.io/pypi/format/coverage.svg + :target: https://pypi.python.org/pypi/coverage + :alt: Kit format +.. |downloads| image:: https://img.shields.io/pypi/dw/coverage.svg + :target: https://pypi.python.org/pypi/coverage + :alt: Weekly PyPI downloads +.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg + :target: https://pypi.python.org/pypi/coverage + :alt: Python versions supported +.. |status| image:: https://img.shields.io/pypi/status/coverage.svg + :target: https://pypi.python.org/pypi/coverage + :alt: Package stability +.. |license| image:: https://img.shields.io/pypi/l/coverage.svg + :target: https://pypi.python.org/pypi/coverage + :alt: License +.. |codecov| image:: http://codecov.io/github/nedbat/coveragepy/coverage.svg?branch=master&precision=2 + :target: http://codecov.io/github/nedbat/coveragepy?branch=master + :alt: Coverage! +.. |repos| image:: https://repology.org/badge/tiny-repos/python:coverage.svg + :target: https://repology.org/metapackage/python:coverage/versions + :alt: Packaging status +.. |saythanks| image:: https://img.shields.io/badge/saythanks.io-%E2%98%BC-1EAEDB.svg + :target: https://saythanks.io/to/nedbat + :alt: Say thanks :) +.. |tidelift| image:: https://tidelift.com/badges/github/nedbat/coveragepy + :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme + :alt: Tidelift + + diff --git a/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/INSTALLER b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/LICENSE.txt b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/LICENSE.txt new file mode 100644 index 0000000..f433b1a --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/LICENSE.txt @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/METADATA b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/METADATA new file mode 100644 index 0000000..7dc331c --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/METADATA @@ -0,0 +1,168 @@ +Metadata-Version: 2.0 +Name: coverage +Version: 4.5.3 +Summary: Code coverage measurement for Python +Home-page: https://github.com/nedbat/coveragepy +Author: Ned Batchelder and 100 others +Author-email: ned@nedbatchelder.com +License: Apache 2.0 +Keywords: code coverage testing +Platform: UNKNOWN +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: Implementation :: Jython +Classifier: Programming Language :: Python :: Implementation :: IronPython +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Software Development :: Testing +Classifier: Development Status :: 5 - Production/Stable +Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.*, <4 + +.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +.. For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +=========== +Coverage.py +=========== + +Code coverage testing for Python. + +| |license| |versions| |status| |docs| +| |ci-status| |win-ci-status| |codecov| +| |kit| |format| |repos| +| |tidelift| |saythanks| + +.. downloads badge seems to be broken... |downloads| + +Coverage.py measures code coverage, typically during test execution. It uses +the code analysis tools and tracing hooks provided in the Python standard +library to determine which lines are executable, and which have been executed. + +.. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logos_RGB_Tidelift_Shorthand_On-White_small.png + :width: 75 + :alt: Tidelift + +.. list-table:: + :widths: 10 100 + + * - |tideliftlogo| + - Professional support for coverage.py is available as part of the `Tidelift + Subscription`_. Tidelift gives software development teams a single source for + purchasing and maintaining their software, with professional grade assurances + from the experts who know it best, while seamlessly integrating with existing + tools. + +.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme + +Coverage.py runs on many versions of Python: + +* CPython 2.6, 2.7 and 3.3 through alpha 3.8. +* PyPy2 6.0 and PyPy3 6.0. +* Jython 2.7.1, though not for reporting. +* IronPython 2.7.7, though not for reporting. + +Documentation is on `Read the Docs`_. Code repository and issue tracker are on +`GitHub`_. + +.. _Read the Docs: https://coverage.readthedocs.io/ +.. _GitHub: https://github.com/nedbat/coveragepy + + +**New in 4.5:** Configurator plug-ins. + +New in 4.4: Suppressable warnings, continuous coverage measurement. + +New in 4.3: HTML ``--skip-covered``, sys.excepthook support, tox.ini +support. + +New in 4.2: better support for multiprocessing and combining data. + +New in 4.1: much-improved branch coverage. + +New in 4.0: ``--concurrency``, plugins for non-Python files, setup.cfg +support, --skip-covered, HTML filtering, and more than 50 issues closed. + + +Getting Started +--------------- + +See the `Quick Start section`_ of the docs. + +.. _Quick Start section: https://coverage.readthedocs.io/#quick-start + + +Contributing +------------ + +See the `Contributing section`_ of the docs. + +.. _Contributing section: https://coverage.readthedocs.io/en/latest/contributing.html + + +License +------- + +Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_. + +.. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 +.. _NOTICE.txt: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + + +.. |ci-status| image:: https://travis-ci.org/nedbat/coveragepy.svg?branch=master + :target: https://travis-ci.org/nedbat/coveragepy + :alt: Build status +.. |win-ci-status| image:: https://ci.appveyor.com/api/projects/status/kmeqpdje7h9r6vsf/branch/master?svg=true + :target: https://ci.appveyor.com/project/nedbat/coveragepy + :alt: Windows build status +.. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat + :target: https://coverage.readthedocs.io/ + :alt: Documentation +.. |reqs| image:: https://requires.io/github/nedbat/coveragepy/requirements.svg?branch=master + :target: https://requires.io/github/nedbat/coveragepy/requirements/?branch=master + :alt: Requirements status +.. |kit| image:: https://badge.fury.io/py/coverage.svg + :target: https://pypi.python.org/pypi/coverage + :alt: PyPI status +.. |format| image:: https://img.shields.io/pypi/format/coverage.svg + :target: https://pypi.python.org/pypi/coverage + :alt: Kit format +.. |downloads| image:: https://img.shields.io/pypi/dw/coverage.svg + :target: https://pypi.python.org/pypi/coverage + :alt: Weekly PyPI downloads +.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg + :target: https://pypi.python.org/pypi/coverage + :alt: Python versions supported +.. |status| image:: https://img.shields.io/pypi/status/coverage.svg + :target: https://pypi.python.org/pypi/coverage + :alt: Package stability +.. |license| image:: https://img.shields.io/pypi/l/coverage.svg + :target: https://pypi.python.org/pypi/coverage + :alt: License +.. |codecov| image:: http://codecov.io/github/nedbat/coveragepy/coverage.svg?branch=master&precision=2 + :target: http://codecov.io/github/nedbat/coveragepy?branch=master + :alt: Coverage! +.. |repos| image:: https://repology.org/badge/tiny-repos/python:coverage.svg + :target: https://repology.org/metapackage/python:coverage/versions + :alt: Packaging status +.. |saythanks| image:: https://img.shields.io/badge/saythanks.io-%E2%98%BC-1EAEDB.svg + :target: https://saythanks.io/to/nedbat + :alt: Say thanks :) +.. |tidelift| image:: https://tidelift.com/badges/github/nedbat/coveragepy + :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme + :alt: Tidelift + + diff --git a/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/RECORD b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/RECORD new file mode 100644 index 0000000..711e90f --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/RECORD @@ -0,0 +1,88 @@ +../../../bin/coverage,sha256=jrzIhj0N7U0BQ6miSWIJgRbK0iKNG-mJET3C2YHj1Ho,270 +../../../bin/coverage-2.7,sha256=jrzIhj0N7U0BQ6miSWIJgRbK0iKNG-mJET3C2YHj1Ho,270 +../../../bin/coverage2,sha256=jrzIhj0N7U0BQ6miSWIJgRbK0iKNG-mJET3C2YHj1Ho,270 +coverage-4.5.3.dist-info/DESCRIPTION.rst,sha256=o0YksXFFmfXRTEQ5qpIYWxjstjV93T8Koo-oXuWXGbE,5091 +coverage-4.5.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +coverage-4.5.3.dist-info/LICENSE.txt,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +coverage-4.5.3.dist-info/METADATA,sha256=_WnX4GpfTHrEmQ89E3Jo98ZAmFWWnrx3f6eMfbnHRX8,6623 +coverage-4.5.3.dist-info/RECORD,, +coverage-4.5.3.dist-info/WHEEL,sha256=pUUYF_efnmHed83CTn2kHWl-HTIiIju-ug6d79lJQzE,110 +coverage-4.5.3.dist-info/entry_points.txt,sha256=xRhYpOjq1-y_UKCF8RzFL7__AZVIyFfEZBEH1rovM54,123 +coverage-4.5.3.dist-info/metadata.json,sha256=lbfhJSYe0FWekfGeybDqZHGoi4ZDc6ThdWqtzqR70RE,1893 +coverage-4.5.3.dist-info/top_level.txt,sha256=BjhyiIvusb5OJkqCXjRncTF3soKF-mDOby-hxkWwwv0,9 +coverage/__init__.py,sha256=bnhf_jaujhqy5dwKjpHf-SMOJ5ZvBH6WFfJnyRlZyMU,1345 +coverage/__init__.pyc,, +coverage/__main__.py,sha256=wJB-zSAhlLmQk_q0uCK0H_FfdN6F0OeMCWV0FFzXl8c,257 +coverage/__main__.pyc,, +coverage/annotate.py,sha256=IrZWawG2gG4vHWW1mIxqYSA9ZvaSceNEvb3J-E38Qk0,3368 +coverage/annotate.pyc,, +coverage/backunittest.py,sha256=XVNUzPW0AtfqbtzVkFGSjPl9hfNp4slbXHghtPDBzQo,1515 +coverage/backunittest.pyc,, +coverage/backward.py,sha256=aW0iyulr8LHESLd0Nuq6UYxCUDyhRpjhmOG7IO_p4ME,5252 +coverage/backward.pyc,, +coverage/bytecode.py,sha256=GbkgjPPtfnlEyFed0oenjJQp9jHJCPGzQhrJqKhaZKI,723 +coverage/bytecode.pyc,, +coverage/cmdline.py,sha256=duKnvp7uCvvYxzwrq4HXaSHKk28aU0m1Kao8MqBzeK4,26017 +coverage/cmdline.pyc,, +coverage/collector.py,sha256=S24asnURZv-yQwNPr9vSr9vX4yyZPP3IWNdaQleyb-I,16389 +coverage/collector.pyc,, +coverage/config.py,sha256=cxWmauei-ES7hwgEBLiuh4iRSEOPlDfOdTxWVETowNo,17122 +coverage/config.pyc,, +coverage/control.py,sha256=kqWGT_-0UWSHWVuurNDjCvXyu6OiNbvDm8jT9Mi_T3Q,49213 +coverage/control.pyc,, +coverage/data.py,sha256=X1SrHJ0zEM7rsFjb-oRihbbfP_BbQDNpUQ9yavmDkuw,28824 +coverage/data.pyc,, +coverage/debug.py,sha256=mLJP6aexvvsQDNMq7nIzH9TYNBCERKodJd_serDsuFc,9871 +coverage/debug.pyc,, +coverage/env.py,sha256=Sp4tFzBjNJugwv2T2mpSxZS5kiTSiloodFebELWBBAw,1843 +coverage/env.pyc,, +coverage/execfile.py,sha256=zXTJOzeduWb5PK0gTetw1w2-dOqLVcKqW5sQmXy8rGw,10413 +coverage/execfile.pyc,, +coverage/files.py,sha256=BL0Ky_s6s_Uc1XQXXpUa3lovvU9sLKSeY-xDsIYyzUI,13957 +coverage/files.pyc,, +coverage/fullcoverage/encodings.py,sha256=mAsETebhlMwcySQbOzeuTGDPzVfrnJ4rvv0J4jkucKo,2548 +coverage/fullcoverage/encodings.pyc,, +coverage/html.py,sha256=FDUoOpd-fclw-99tRJl7ilEy59wMrouSksxm9FBZTTA,15233 +coverage/html.pyc,, +coverage/htmlfiles/coverage_html.js,sha256=m60KpPDw7SSBYP04pyRZv0MkTbSEadEQAcsOb5M79fI,18458 +coverage/htmlfiles/index.html,sha256=UTQJtDKR8H2t7Xw_aUyhAL20t0N8Nb8vnUHnHb7mIHw,4196 +coverage/htmlfiles/jquery.ba-throttle-debounce.min.js,sha256=wXepUsOX1VYr5AyWGbIoAqlrvjQz9unPbtEyM7wFBnw,731 +coverage/htmlfiles/jquery.hotkeys.js,sha256=VVqbRGJlCvARPGMJXcSiRRIJwkpged3wG5fQ47gi42U,3065 +coverage/htmlfiles/jquery.isonscreen.js,sha256=WEyXE6yTYNzAYfzViwksNu7AJAY5zZBI9q6-J9pF1Zw,1502 +coverage/htmlfiles/jquery.min.js,sha256=JCYrqv7xcJKSfD2v52SqpSoqNxuD7SJJzKfkFN-Z-sE,95785 +coverage/htmlfiles/jquery.tablesorter.min.js,sha256=t4ifnz2eByQEUafncoSdJUwD2jUt68VY8CzNjAywo08,12795 +coverage/htmlfiles/keybd_closed.png,sha256=FNDmw-Lx9aptnMr-XiZ4gh2EGPs17nft-QKmXgilIe0,112 +coverage/htmlfiles/keybd_open.png,sha256=FNDmw-Lx9aptnMr-XiZ4gh2EGPs17nft-QKmXgilIe0,112 +coverage/htmlfiles/pyfile.html,sha256=q-BC_gq4WnS_0cB-j2b41K-MJsN3A-1wRDgdij9dhlk,3911 +coverage/htmlfiles/style.css,sha256=jATM7TxH5GdBaBsWKWb66IWtV8ZKoD-75Uwp4mEdPZQ,6757 +coverage/misc.py,sha256=pEbIcpYkB1AmbbB_EkAOxdkykMpGBWob-YBC8oHJb1Q,9039 +coverage/misc.pyc,, +coverage/multiproc.py,sha256=7XIahKlHWGEoiHPgAiiURxMgD4enus2JWD-rbYhxXcw,3608 +coverage/multiproc.pyc,, +coverage/parser.py,sha256=VD7TY0lpSbkCAjYMwQAGf6OkLPVlaJY3Q0nPu7SVYzc,48759 +coverage/parser.pyc,, +coverage/phystokens.py,sha256=7AacLOzrxXDRw3UsJ0a0KAsz3Zl3IGZV3Hns65fJtVM,10099 +coverage/phystokens.pyc,, +coverage/pickle2json.py,sha256=N1qoIc0AF1KDc5xY_e-bqFzZJXyFz-Ii-_H__QAaBYs,1489 +coverage/pickle2json.pyc,, +coverage/plugin.py,sha256=Uj0ldo4nJA3O4JIp9wIIzQUF9LR8YowhQLMByN0KqyA,16690 +coverage/plugin.pyc,, +coverage/plugin_support.py,sha256=tSmYHE_GsWnhyG1ZOHD9CjUjE4x2fkq2mncwGxiNirE,8213 +coverage/plugin_support.pyc,, +coverage/python.py,sha256=SEYJC7upIP4hgdDHtOwRVhOKs-g10ZCO76fyuclVyw8,7544 +coverage/python.pyc,, +coverage/pytracer.py,sha256=b22mUiU2UP-b-MsAurzhaXYZdNrRuqNd76JvMaPkD24,8888 +coverage/pytracer.pyc,, +coverage/report.py,sha256=O6vMkd6HYh8KXnY_TxpNhI9JHPLhZcatWu3ha3BtG9s,3636 +coverage/report.pyc,, +coverage/results.py,sha256=4Vk5gH9zbjbo6BDf5H3dJWHu-xravOb4P85p-CqH738,10222 +coverage/results.pyc,, +coverage/summary.py,sha256=xwxMJMVn0wqd_SpNNKmt0QtZKyT1cdlsl2UIzjviVP8,6535 +coverage/summary.pyc,, +coverage/templite.py,sha256=AiDaq-6L6G5vA1JowXpOx-98viMdWFAxC4nuYjZBag4,9982 +coverage/templite.pyc,, +coverage/tracer.so,sha256=6LiLliPFERXiJDGjrAyO9rJ8aY3tGb4ZZsQB_AyE7aM,51520 +coverage/version.py,sha256=zfodhWgmGvYaNlj-PZUDJhFfw5NhpTP4TP05sAQGBJI,1260 +coverage/version.pyc,, +coverage/xmlreport.py,sha256=Nuf5lVEvY7bNrATfdy97dWWfv7a1oawnBGZod8GtUyU,8980 +coverage/xmlreport.pyc,, diff --git a/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/WHEEL b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/WHEEL new file mode 100644 index 0000000..ad51803 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: false +Tag: cp27-cp27m-macosx_10_13_intel + diff --git a/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/entry_points.txt b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/entry_points.txt new file mode 100644 index 0000000..a487c42 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +coverage = coverage.cmdline:main +coverage-2.7 = coverage.cmdline:main +coverage2 = coverage.cmdline:main + diff --git a/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/metadata.json b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/metadata.json new file mode 100644 index 0000000..e97362b --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: Implementation :: Jython", "Programming Language :: Python :: Implementation :: IronPython", "Topic :: Software Development :: Quality Assurance", "Topic :: Software Development :: Testing", "Development Status :: 5 - Production/Stable"], "extensions": {"python.commands": {"wrap_console": {"coverage": "coverage.cmdline:main", "coverage-2.7": "coverage.cmdline:main", "coverage2": "coverage.cmdline:main"}}, "python.details": {"contacts": [{"email": "ned@nedbatchelder.com", "name": "Ned Batchelder and 100 others", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "https://github.com/nedbat/coveragepy"}}, "python.exports": {"console_scripts": {"coverage": "coverage.cmdline:main", "coverage-2.7": "coverage.cmdline:main", "coverage2": "coverage.cmdline:main"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["code", "coverage", "testing"], "license": "Apache 2.0", "metadata_version": "2.0", "name": "coverage", "requires_python": ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, <4", "summary": "Code coverage measurement for Python", "version": "4.5.3"} \ No newline at end of file diff --git a/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/top_level.txt b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/top_level.txt new file mode 100644 index 0000000..4ebc8ae --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage-4.5.3.dist-info/top_level.txt @@ -0,0 +1 @@ +coverage diff --git a/env_27/lib/python2.7/site-packages/coverage/__init__.py b/env_27/lib/python2.7/site-packages/coverage/__init__.py new file mode 100644 index 0000000..63f488f --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/__init__.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Code coverage measurement for Python. + +Ned Batchelder +https://nedbatchelder.com/code/coverage + +""" + +from coverage.version import __version__, __url__, version_info + +from coverage.control import Coverage, process_startup +from coverage.data import CoverageData +from coverage.debug import enable_aspectlib_maybe +from coverage.misc import CoverageException +from coverage.plugin import CoveragePlugin, FileTracer, FileReporter +from coverage.pytracer import PyTracer + +# Backward compatibility. +coverage = Coverage + +# Possibly enable aspectlib to debug our execution. +enable_aspectlib_maybe() + +# On Windows, we encode and decode deep enough that something goes wrong and +# the encodings.utf_8 module is loaded and then unloaded, I don't know why. +# Adding a reference here prevents it from being unloaded. Yuk. +import encodings.utf_8 + +# Because of the "from coverage.control import fooey" lines at the top of the +# file, there's an entry for coverage.coverage in sys.modules, mapped to None. +# This makes some inspection tools (like pydoc) unable to find the class +# coverage.coverage. So remove that entry. +import sys +try: + del sys.modules['coverage.coverage'] +except KeyError: + pass diff --git a/env_27/lib/python2.7/site-packages/coverage/__main__.py b/env_27/lib/python2.7/site-packages/coverage/__main__.py new file mode 100644 index 0000000..35ab87a --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/__main__.py @@ -0,0 +1,8 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Coverage.py's main entry point.""" + +import sys +from coverage.cmdline import main +sys.exit(main()) diff --git a/env_27/lib/python2.7/site-packages/coverage/annotate.py b/env_27/lib/python2.7/site-packages/coverage/annotate.py new file mode 100644 index 0000000..4060450 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/annotate.py @@ -0,0 +1,103 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Source file annotation for coverage.py.""" + +import io +import os +import re + +from coverage.files import flat_rootname +from coverage.misc import isolate_module +from coverage.report import Reporter + +os = isolate_module(os) + + +class AnnotateReporter(Reporter): + """Generate annotated source files showing line coverage. + + This reporter creates annotated copies of the measured source files. Each + .py file is copied as a .py,cover file, with a left-hand margin annotating + each line:: + + > def h(x): + - if 0: #pragma: no cover + - pass + > if x == 1: + ! a = 1 + > else: + > a = 2 + + > h(2) + + Executed lines use '>', lines not executed use '!', lines excluded from + consideration use '-'. + + """ + + def __init__(self, coverage, config): + super(AnnotateReporter, self).__init__(coverage, config) + self.directory = None + + blank_re = re.compile(r"\s*(#|$)") + else_re = re.compile(r"\s*else\s*:\s*(#|$)") + + def report(self, morfs, directory=None): + """Run the report. + + See `coverage.report()` for arguments. + + """ + self.report_files(self.annotate_file, morfs, directory) + + def annotate_file(self, fr, analysis): + """Annotate a single file. + + `fr` is the FileReporter for the file to annotate. + + """ + statements = sorted(analysis.statements) + missing = sorted(analysis.missing) + excluded = sorted(analysis.excluded) + + if self.directory: + dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename())) + if dest_file.endswith("_py"): + dest_file = dest_file[:-3] + ".py" + dest_file += ",cover" + else: + dest_file = fr.filename + ",cover" + + with io.open(dest_file, 'w', encoding='utf8') as dest: + i = 0 + j = 0 + covered = True + source = fr.source() + for lineno, line in enumerate(source.splitlines(True), start=1): + while i < len(statements) and statements[i] < lineno: + i += 1 + while j < len(missing) and missing[j] < lineno: + j += 1 + if i < len(statements) and statements[i] == lineno: + covered = j >= len(missing) or missing[j] > lineno + if self.blank_re.match(line): + dest.write(u' ') + elif self.else_re.match(line): + # Special logic for lines containing only 'else:'. + if i >= len(statements) and j >= len(missing): + dest.write(u'! ') + elif i >= len(statements) or j >= len(missing): + dest.write(u'> ') + elif statements[i] == missing[j]: + dest.write(u'! ') + else: + dest.write(u'> ') + elif lineno in excluded: + dest.write(u'- ') + elif covered: + dest.write(u'> ') + else: + dest.write(u'! ') + + dest.write(line) diff --git a/env_27/lib/python2.7/site-packages/coverage/backunittest.py b/env_27/lib/python2.7/site-packages/coverage/backunittest.py new file mode 100644 index 0000000..09574cc --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/backunittest.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Implementations of unittest features from the future.""" + +# Use unittest2 if it's available, otherwise unittest. This gives us +# back-ported features for 2.6. +try: + import unittest2 as unittest +except ImportError: + import unittest + + +def unittest_has(method): + """Does `unittest.TestCase` have `method` defined?""" + return hasattr(unittest.TestCase, method) + + +class TestCase(unittest.TestCase): + """Just like unittest.TestCase, but with assert methods added. + + Designed to be compatible with 3.1 unittest. Methods are only defined if + `unittest` doesn't have them. + + """ + # pylint: disable=missing-docstring + + # Many Pythons have this method defined. But PyPy3 has a bug with it + # somehow (https://bitbucket.org/pypy/pypy/issues/2092), so always use our + # own implementation that works everywhere, at least for the ways we're + # calling it. + def assertCountEqual(self, s1, s2): + """Assert these have the same elements, regardless of order.""" + self.assertEqual(sorted(s1), sorted(s2)) + + if not unittest_has('assertRaisesRegex'): + def assertRaisesRegex(self, *args, **kwargs): + return self.assertRaisesRegexp(*args, **kwargs) + + if not unittest_has('assertRegex'): + def assertRegex(self, *args, **kwargs): + return self.assertRegexpMatches(*args, **kwargs) diff --git a/env_27/lib/python2.7/site-packages/coverage/backward.py b/env_27/lib/python2.7/site-packages/coverage/backward.py new file mode 100644 index 0000000..5aff640 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/backward.py @@ -0,0 +1,190 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Add things to old Pythons so I can pretend they are newer.""" + +# This file does tricky stuff, so disable a pylint warning. +# pylint: disable=unused-import + +import sys + +from coverage import env + + +# Pythons 2 and 3 differ on where to get StringIO. +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO + +# In py3, ConfigParser was renamed to the more-standard configparser. +# But there's a py3 backport that installs "configparser" in py2, and I don't +# want it because it has annoying deprecation warnings. So try the real py2 +# import first. +try: + import ConfigParser as configparser +except ImportError: + import configparser + +# What's a string called? +try: + string_class = basestring +except NameError: + string_class = str + +# What's a Unicode string called? +try: + unicode_class = unicode +except NameError: + unicode_class = str + +# Where do pickles come from? +try: + import cPickle as pickle +except ImportError: + import pickle + +# range or xrange? +try: + range = xrange # pylint: disable=redefined-builtin +except NameError: + range = range + +# shlex.quote is new, but there's an undocumented implementation in "pipes", +# who knew!? +try: + from shlex import quote as shlex_quote +except ImportError: + # Useful function, available under a different (undocumented) name + # in Python versions earlier than 3.3. + from pipes import quote as shlex_quote + +# A function to iterate listlessly over a dict's items, and one to get the +# items as a list. +try: + {}.iteritems +except AttributeError: + # Python 3 + def iitems(d): + """Produce the items from dict `d`.""" + return d.items() + + def litems(d): + """Return a list of items from dict `d`.""" + return list(d.items()) +else: + # Python 2 + def iitems(d): + """Produce the items from dict `d`.""" + return d.iteritems() + + def litems(d): + """Return a list of items from dict `d`.""" + return d.items() + +# Getting the `next` function from an iterator is different in 2 and 3. +try: + iter([]).next +except AttributeError: + def iternext(seq): + """Get the `next` function for iterating over `seq`.""" + return iter(seq).__next__ +else: + def iternext(seq): + """Get the `next` function for iterating over `seq`.""" + return iter(seq).next + +# Python 3.x is picky about bytes and strings, so provide methods to +# get them right, and make them no-ops in 2.x +if env.PY3: + def to_bytes(s): + """Convert string `s` to bytes.""" + return s.encode('utf8') + + def binary_bytes(byte_values): + """Produce a byte string with the ints from `byte_values`.""" + return bytes(byte_values) + + def bytes_to_ints(bytes_value): + """Turn a bytes object into a sequence of ints.""" + # In Python 3, iterating bytes gives ints. + return bytes_value + +else: + def to_bytes(s): + """Convert string `s` to bytes (no-op in 2.x).""" + return s + + def binary_bytes(byte_values): + """Produce a byte string with the ints from `byte_values`.""" + return "".join(chr(b) for b in byte_values) + + def bytes_to_ints(bytes_value): + """Turn a bytes object into a sequence of ints.""" + for byte in bytes_value: + yield ord(byte) + + +try: + # In Python 2.x, the builtins were in __builtin__ + BUILTINS = sys.modules['__builtin__'] +except KeyError: + # In Python 3.x, they're in builtins + BUILTINS = sys.modules['builtins'] + + +# imp was deprecated in Python 3.3 +try: + import importlib + import importlib.util + imp = None +except ImportError: + importlib = None + +# We only want to use importlib if it has everything we need. +try: + importlib_util_find_spec = importlib.util.find_spec +except Exception: + import imp + importlib_util_find_spec = None + +# What is the .pyc magic number for this version of Python? +try: + PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER +except AttributeError: + PYC_MAGIC_NUMBER = imp.get_magic() + + +def invalidate_import_caches(): + """Invalidate any import caches that may or may not exist.""" + if importlib and hasattr(importlib, "invalidate_caches"): + importlib.invalidate_caches() + + +def import_local_file(modname, modfile=None): + """Import a local file as a module. + + Opens a file in the current directory named `modname`.py, imports it + as `modname`, and returns the module object. `modfile` is the file to + import if it isn't in the current directory. + + """ + try: + from importlib.machinery import SourceFileLoader + except ImportError: + SourceFileLoader = None + + if modfile is None: + modfile = modname + '.py' + if SourceFileLoader: + mod = SourceFileLoader(modname, modfile).load_module() + else: + for suff in imp.get_suffixes(): # pragma: part covered + if suff[0] == '.py': + break + + with open(modfile, 'r') as f: + # pylint: disable=undefined-loop-variable + mod = imp.load_module(modname, f, modfile, suff) + + return mod diff --git a/env_27/lib/python2.7/site-packages/coverage/bytecode.py b/env_27/lib/python2.7/site-packages/coverage/bytecode.py new file mode 100644 index 0000000..d823c67 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/bytecode.py @@ -0,0 +1,22 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Bytecode manipulation for coverage.py""" + +import types + + +class CodeObjects(object): + """Iterate over all the code objects in `code`.""" + def __init__(self, code): + self.stack = [code] + + def __iter__(self): + while self.stack: + # We're going to return the code object on the stack, but first + # push its children for later returning. + code = self.stack.pop() + for c in code.co_consts: + if isinstance(c, types.CodeType): + self.stack.append(c) + yield code diff --git a/env_27/lib/python2.7/site-packages/coverage/cmdline.py b/env_27/lib/python2.7/site-packages/coverage/cmdline.py new file mode 100644 index 0000000..79d0f26 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/cmdline.py @@ -0,0 +1,773 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Command-line support for coverage.py.""" + +from __future__ import print_function + +import glob +import optparse +import os.path +import sys +import textwrap +import traceback + +from coverage import env +from coverage.collector import CTracer +from coverage.debug import info_formatter, info_header +from coverage.execfile import run_python_file, run_python_module +from coverage.misc import BaseCoverageException, ExceptionDuringRun, NoSource +from coverage.results import should_fail_under + + +class Opts(object): + """A namespace class for individual options we'll build parsers from.""" + + append = optparse.make_option( + '-a', '--append', action='store_true', + help="Append coverage data to .coverage, otherwise it starts clean each time.", + ) + branch = optparse.make_option( + '', '--branch', action='store_true', + help="Measure branch coverage in addition to statement coverage.", + ) + CONCURRENCY_CHOICES = [ + "thread", "gevent", "greenlet", "eventlet", "multiprocessing", + ] + concurrency = optparse.make_option( + '', '--concurrency', action='store', metavar="LIB", + choices=CONCURRENCY_CHOICES, + help=( + "Properly measure code using a concurrency library. " + "Valid values are: %s." + ) % ", ".join(CONCURRENCY_CHOICES), + ) + debug = optparse.make_option( + '', '--debug', action='store', metavar="OPTS", + help="Debug options, separated by commas", + ) + directory = optparse.make_option( + '-d', '--directory', action='store', metavar="DIR", + help="Write the output files to DIR.", + ) + fail_under = optparse.make_option( + '', '--fail-under', action='store', metavar="MIN", type="float", + help="Exit with a status of 2 if the total coverage is less than MIN.", + ) + help = optparse.make_option( + '-h', '--help', action='store_true', + help="Get help on this command.", + ) + ignore_errors = optparse.make_option( + '-i', '--ignore-errors', action='store_true', + help="Ignore errors while reading source files.", + ) + include = optparse.make_option( + '', '--include', action='store', + metavar="PAT1,PAT2,...", + help=( + "Include only files whose paths match one of these patterns. " + "Accepts shell-style wildcards, which must be quoted." + ), + ) + pylib = optparse.make_option( + '-L', '--pylib', action='store_true', + help=( + "Measure coverage even inside the Python installed library, " + "which isn't done by default." + ), + ) + show_missing = optparse.make_option( + '-m', '--show-missing', action='store_true', + help="Show line numbers of statements in each module that weren't executed.", + ) + skip_covered = optparse.make_option( + '--skip-covered', action='store_true', + help="Skip files with 100% coverage.", + ) + omit = optparse.make_option( + '', '--omit', action='store', + metavar="PAT1,PAT2,...", + help=( + "Omit files whose paths match one of these patterns. " + "Accepts shell-style wildcards, which must be quoted." + ), + ) + output_xml = optparse.make_option( + '-o', '', action='store', dest="outfile", + metavar="OUTFILE", + help="Write the XML report to this file. Defaults to 'coverage.xml'", + ) + parallel_mode = optparse.make_option( + '-p', '--parallel-mode', action='store_true', + help=( + "Append the machine name, process id and random number to the " + ".coverage data file name to simplify collecting data from " + "many processes." + ), + ) + module = optparse.make_option( + '-m', '--module', action='store_true', + help=( + " is an importable Python module, not a script path, " + "to be run as 'python -m' would run it." + ), + ) + rcfile = optparse.make_option( + '', '--rcfile', action='store', + help=( + "Specify configuration file. " + "By default '.coveragerc', 'setup.cfg' and 'tox.ini' are tried." + ), + ) + source = optparse.make_option( + '', '--source', action='store', metavar="SRC1,SRC2,...", + help="A list of packages or directories of code to be measured.", + ) + timid = optparse.make_option( + '', '--timid', action='store_true', + help=( + "Use a simpler but slower trace method. Try this if you get " + "seemingly impossible results!" + ), + ) + title = optparse.make_option( + '', '--title', action='store', metavar="TITLE", + help="A text string to use as the title on the HTML.", + ) + version = optparse.make_option( + '', '--version', action='store_true', + help="Display version information and exit.", + ) + + +class CoverageOptionParser(optparse.OptionParser, object): + """Base OptionParser for coverage.py. + + Problems don't exit the program. + Defaults are initialized for all options. + + """ + + def __init__(self, *args, **kwargs): + super(CoverageOptionParser, self).__init__( + add_help_option=False, *args, **kwargs + ) + self.set_defaults( + action=None, + append=None, + branch=None, + concurrency=None, + debug=None, + directory=None, + fail_under=None, + help=None, + ignore_errors=None, + include=None, + module=None, + omit=None, + parallel_mode=None, + pylib=None, + rcfile=True, + show_missing=None, + skip_covered=None, + source=None, + timid=None, + title=None, + version=None, + ) + + self.disable_interspersed_args() + self.help_fn = self.help_noop + + def help_noop(self, error=None, topic=None, parser=None): + """No-op help function.""" + pass + + class OptionParserError(Exception): + """Used to stop the optparse error handler ending the process.""" + pass + + def parse_args_ok(self, args=None, options=None): + """Call optparse.parse_args, but return a triple: + + (ok, options, args) + + """ + try: + options, args = \ + super(CoverageOptionParser, self).parse_args(args, options) + except self.OptionParserError: + return False, None, None + return True, options, args + + def error(self, msg): + """Override optparse.error so sys.exit doesn't get called.""" + self.help_fn(msg) + raise self.OptionParserError + + +class GlobalOptionParser(CoverageOptionParser): + """Command-line parser for coverage.py global option arguments.""" + + def __init__(self): + super(GlobalOptionParser, self).__init__() + + self.add_options([ + Opts.help, + Opts.version, + ]) + + +class CmdOptionParser(CoverageOptionParser): + """Parse one of the new-style commands for coverage.py.""" + + def __init__(self, action, options, defaults=None, usage=None, description=None): + """Create an OptionParser for a coverage.py command. + + `action` is the slug to put into `options.action`. + `options` is a list of Option's for the command. + `defaults` is a dict of default value for options. + `usage` is the usage string to display in help. + `description` is the description of the command, for the help text. + + """ + if usage: + usage = "%prog " + usage + super(CmdOptionParser, self).__init__( + usage=usage, + description=description, + ) + self.set_defaults(action=action, **(defaults or {})) + self.add_options(options) + self.cmd = action + + def __eq__(self, other): + # A convenience equality, so that I can put strings in unit test + # results, and they will compare equal to objects. + return (other == "" % self.cmd) + + __hash__ = None # This object doesn't need to be hashed. + + def get_prog_name(self): + """Override of an undocumented function in optparse.OptionParser.""" + program_name = super(CmdOptionParser, self).get_prog_name() + + # Include the sub-command for this parser as part of the command. + return "{command} {subcommand}".format(command=program_name, subcommand=self.cmd) + + +GLOBAL_ARGS = [ + Opts.debug, + Opts.help, + Opts.rcfile, + ] + +CMDS = { + 'annotate': CmdOptionParser( + "annotate", + [ + Opts.directory, + Opts.ignore_errors, + Opts.include, + Opts.omit, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description=( + "Make annotated copies of the given files, marking statements that are executed " + "with > and statements that are missed with !." + ), + ), + + 'combine': CmdOptionParser( + "combine", + [ + Opts.append, + ] + GLOBAL_ARGS, + usage="[options] ... ", + description=( + "Combine data from multiple coverage files collected " + "with 'run -p'. The combined results are written to a single " + "file representing the union of the data. The positional " + "arguments are data files or directories containing data files. " + "If no paths are provided, data files in the default data file's " + "directory are combined." + ), + ), + + 'debug': CmdOptionParser( + "debug", GLOBAL_ARGS, + usage="", + description=( + "Display information on the internals of coverage.py, " + "for diagnosing problems. " + "Topics are 'data' to show a summary of the collected data, " + "or 'sys' to show installation information." + ), + ), + + 'erase': CmdOptionParser( + "erase", GLOBAL_ARGS, + description="Erase previously collected coverage data.", + ), + + 'help': CmdOptionParser( + "help", GLOBAL_ARGS, + usage="[command]", + description="Describe how to use coverage.py", + ), + + 'html': CmdOptionParser( + "html", + [ + Opts.directory, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.title, + Opts.skip_covered, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description=( + "Create an HTML report of the coverage of the files. " + "Each file gets its own page, with the source decorated to show " + "executed, excluded, and missed lines." + ), + ), + + 'report': CmdOptionParser( + "report", + [ + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.show_missing, + Opts.skip_covered, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description="Report coverage statistics on modules." + ), + + 'run': CmdOptionParser( + "run", + [ + Opts.append, + Opts.branch, + Opts.concurrency, + Opts.include, + Opts.module, + Opts.omit, + Opts.pylib, + Opts.parallel_mode, + Opts.source, + Opts.timid, + ] + GLOBAL_ARGS, + usage="[options] [program options]", + description="Run a Python program, measuring code execution." + ), + + 'xml': CmdOptionParser( + "xml", + [ + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.output_xml, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description="Generate an XML report of coverage results." + ), +} + + +OK, ERR, FAIL_UNDER = 0, 1, 2 + + +class CoverageScript(object): + """The command-line interface to coverage.py.""" + + def __init__(self, _covpkg=None, _run_python_file=None, + _run_python_module=None, _help_fn=None, _path_exists=None): + # _covpkg is for dependency injection, so we can test this code. + if _covpkg: + self.covpkg = _covpkg + else: + import coverage + self.covpkg = coverage + + # For dependency injection: + self.run_python_file = _run_python_file or run_python_file + self.run_python_module = _run_python_module or run_python_module + self.help_fn = _help_fn or self.help + self.path_exists = _path_exists or os.path.exists + self.global_option = False + + self.coverage = None + + program_path = sys.argv[0] + if program_path.endswith(os.path.sep + '__main__.py'): + # The path is the main module of a package; get that path instead. + program_path = os.path.dirname(program_path) + self.program_name = os.path.basename(program_path) + if env.WINDOWS: + # entry_points={'console_scripts':...} on Windows makes files + # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These + # invoke coverage-script.py, coverage3-script.py, and + # coverage-3.5-script.py. argv[0] is the .py file, but we want to + # get back to the original form. + auto_suffix = "-script.py" + if self.program_name.endswith(auto_suffix): + self.program_name = self.program_name[:-len(auto_suffix)] + + def command_line(self, argv): + """The bulk of the command line interface to coverage.py. + + `argv` is the argument list to process. + + Returns 0 if all is well, 1 if something went wrong. + + """ + # Collect the command-line options. + if not argv: + self.help_fn(topic='minimum_help') + return OK + + # The command syntax we parse depends on the first argument. Global + # switch syntax always starts with an option. + self.global_option = argv[0].startswith('-') + if self.global_option: + parser = GlobalOptionParser() + else: + parser = CMDS.get(argv[0]) + if not parser: + self.help_fn("Unknown command: '%s'" % argv[0]) + return ERR + argv = argv[1:] + + parser.help_fn = self.help_fn + ok, options, args = parser.parse_args_ok(argv) + if not ok: + return ERR + + # Handle help and version. + if self.do_help(options, args, parser): + return OK + + # We need to be able to import from the current directory, because + # plugins may try to, for example, to read Django settings. + sys.path[0] = '' + + # Listify the list options. + source = unshell_list(options.source) + omit = unshell_list(options.omit) + include = unshell_list(options.include) + debug = unshell_list(options.debug) + + # Do something. + self.coverage = self.covpkg.Coverage( + data_suffix=options.parallel_mode, + cover_pylib=options.pylib, + timid=options.timid, + branch=options.branch, + config_file=options.rcfile, + source=source, + omit=omit, + include=include, + debug=debug, + concurrency=options.concurrency, + ) + + if options.action == "debug": + return self.do_debug(args) + + elif options.action == "erase": + self.coverage.erase() + return OK + + elif options.action == "run": + return self.do_run(options, args) + + elif options.action == "combine": + if options.append: + self.coverage.load() + data_dirs = args or None + self.coverage.combine(data_dirs, strict=True) + self.coverage.save() + return OK + + # Remaining actions are reporting, with some common options. + report_args = dict( + morfs=unglob_args(args), + ignore_errors=options.ignore_errors, + omit=omit, + include=include, + ) + + self.coverage.load() + + total = None + if options.action == "report": + total = self.coverage.report( + show_missing=options.show_missing, + skip_covered=options.skip_covered, **report_args) + elif options.action == "annotate": + self.coverage.annotate( + directory=options.directory, **report_args) + elif options.action == "html": + total = self.coverage.html_report( + directory=options.directory, title=options.title, + skip_covered=options.skip_covered, **report_args) + elif options.action == "xml": + outfile = options.outfile + total = self.coverage.xml_report(outfile=outfile, **report_args) + + if total is not None: + # Apply the command line fail-under options, and then use the config + # value, so we can get fail_under from the config file. + if options.fail_under is not None: + self.coverage.set_option("report:fail_under", options.fail_under) + + fail_under = self.coverage.get_option("report:fail_under") + precision = self.coverage.get_option("report:precision") + if should_fail_under(total, fail_under, precision): + return FAIL_UNDER + + return OK + + def help(self, error=None, topic=None, parser=None): + """Display an error message, or the named topic.""" + assert error or topic or parser + if error: + print(error, file=sys.stderr) + print("Use '%s help' for help." % (self.program_name,), file=sys.stderr) + elif parser: + print(parser.format_help().strip()) + else: + help_params = dict(self.covpkg.__dict__) + help_params['program_name'] = self.program_name + if CTracer is not None: + help_params['extension_modifier'] = 'with C extension' + else: + help_params['extension_modifier'] = 'without C extension' + help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip() + if help_msg: + print(help_msg.format(**help_params)) + else: + print("Don't know topic %r" % topic) + + def do_help(self, options, args, parser): + """Deal with help requests. + + Return True if it handled the request, False if not. + + """ + # Handle help. + if options.help: + if self.global_option: + self.help_fn(topic='help') + else: + self.help_fn(parser=parser) + return True + + if options.action == "help": + if args: + for a in args: + parser = CMDS.get(a) + if parser: + self.help_fn(parser=parser) + else: + self.help_fn(topic=a) + else: + self.help_fn(topic='help') + return True + + # Handle version. + if options.version: + self.help_fn(topic='version') + return True + + return False + + def do_run(self, options, args): + """Implementation of 'coverage run'.""" + + if not args: + self.help_fn("Nothing to do.") + return ERR + + if options.append and self.coverage.get_option("run:parallel"): + self.help_fn("Can't append to data files in parallel mode.") + return ERR + + if options.concurrency == "multiprocessing": + # Can't set other run-affecting command line options with + # multiprocessing. + for opt_name in ['branch', 'include', 'omit', 'pylib', 'source', 'timid']: + # As it happens, all of these options have no default, meaning + # they will be None if they have not been specified. + if getattr(options, opt_name) is not None: + self.help_fn( + "Options affecting multiprocessing must be specified " + "in a configuration file." + ) + return ERR + + if not self.coverage.get_option("run:parallel"): + if not options.append: + self.coverage.erase() + + # Run the script. + self.coverage.start() + code_ran = True + try: + if options.module: + self.run_python_module(args[0], args) + else: + filename = args[0] + self.run_python_file(filename, args) + except NoSource: + code_ran = False + raise + finally: + self.coverage.stop() + if code_ran: + if options.append: + data_file = self.coverage.get_option("run:data_file") + if self.path_exists(data_file): + self.coverage.combine(data_paths=[data_file]) + self.coverage.save() + + return OK + + def do_debug(self, args): + """Implementation of 'coverage debug'.""" + + if not args: + self.help_fn("What information would you like: config, data, sys?") + return ERR + + for info in args: + if info == 'sys': + sys_info = self.coverage.sys_info() + print(info_header("sys")) + for line in info_formatter(sys_info): + print(" %s" % line) + elif info == 'data': + self.coverage.load() + data = self.coverage.data + print(info_header("data")) + print("path: %s" % self.coverage.data_files.filename) + if data: + print("has_arcs: %r" % data.has_arcs()) + summary = data.line_counts(fullpath=True) + filenames = sorted(summary.keys()) + print("\n%d files:" % len(filenames)) + for f in filenames: + line = "%s: %d lines" % (f, summary[f]) + plugin = data.file_tracer(f) + if plugin: + line += " [%s]" % plugin + print(line) + else: + print("No data collected") + elif info == 'config': + print(info_header("config")) + config_info = self.coverage.config.__dict__.items() + for line in info_formatter(config_info): + print(" %s" % line) + else: + self.help_fn("Don't know what you mean by %r" % info) + return ERR + + return OK + + +def unshell_list(s): + """Turn a command-line argument into a list.""" + if not s: + return None + if env.WINDOWS: + # When running coverage.py as coverage.exe, some of the behavior + # of the shell is emulated: wildcards are expanded into a list of + # file names. So you have to single-quote patterns on the command + # line, but (not) helpfully, the single quotes are included in the + # argument, so we have to strip them off here. + s = s.strip("'") + return s.split(',') + + +def unglob_args(args): + """Interpret shell wildcards for platforms that need it.""" + if env.WINDOWS: + globbed = [] + for arg in args: + if '?' in arg or '*' in arg: + globbed.extend(glob.glob(arg)) + else: + globbed.append(arg) + args = globbed + return args + + +HELP_TOPICS = { + 'help': """\ + Coverage.py, version {__version__} {extension_modifier} + Measure, collect, and report on code coverage in Python programs. + + usage: {program_name} [options] [args] + + Commands: + annotate Annotate source files with execution information. + combine Combine a number of data files. + erase Erase previously collected coverage data. + help Get help on using coverage.py. + html Create an HTML report. + report Report coverage stats on modules. + run Run a Python program and measure code execution. + xml Create an XML report of coverage results. + + Use "{program_name} help " for detailed help on any command. + For full documentation, see {__url__} + """, + + 'minimum_help': """\ + Code coverage for Python. Use '{program_name} help' for help. + """, + + 'version': """\ + Coverage.py, version {__version__} {extension_modifier} + Documentation at {__url__} + """, +} + + +def main(argv=None): + """The main entry point to coverage.py. + + This is installed as the script entry point. + + """ + if argv is None: + argv = sys.argv[1:] + try: + status = CoverageScript().command_line(argv) + except ExceptionDuringRun as err: + # An exception was caught while running the product code. The + # sys.exc_info() return tuple is packed into an ExceptionDuringRun + # exception. + traceback.print_exception(*err.args) # pylint: disable=no-value-for-parameter + status = ERR + except BaseCoverageException as err: + # A controlled error inside coverage.py: print the message to the user. + print(err) + status = ERR + except SystemExit as err: + # The user called `sys.exit()`. Exit with their argument, if any. + if err.args: + status = err.args[0] + else: + status = None + return status diff --git a/env_27/lib/python2.7/site-packages/coverage/collector.py b/env_27/lib/python2.7/site-packages/coverage/collector.py new file mode 100644 index 0000000..72ab32b --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/collector.py @@ -0,0 +1,417 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Raw data collector for coverage.py.""" + +import os +import sys + +from coverage import env +from coverage.backward import litems, range # pylint: disable=redefined-builtin +from coverage.debug import short_stack +from coverage.files import abs_file +from coverage.misc import CoverageException, isolate_module +from coverage.pytracer import PyTracer + +os = isolate_module(os) + + +try: + # Use the C extension code when we can, for speed. + from coverage.tracer import CTracer, CFileDisposition +except ImportError: + # Couldn't import the C extension, maybe it isn't built. + if os.getenv('COVERAGE_TEST_TRACER') == 'c': + # During testing, we use the COVERAGE_TEST_TRACER environment variable + # to indicate that we've fiddled with the environment to test this + # fallback code. If we thought we had a C tracer, but couldn't import + # it, then exit quickly and clearly instead of dribbling confusing + # errors. I'm using sys.exit here instead of an exception because an + # exception here causes all sorts of other noise in unittest. + sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n") + sys.exit(1) + CTracer = None + + +class FileDisposition(object): + """A simple value type for recording what to do with a file.""" + pass + + +def should_start_context(frame): + """Who-Tests-What hack: Determine whether this frame begins a new who-context.""" + fn_name = frame.f_code.co_name + if fn_name.startswith("test"): + return fn_name + return None + + +class Collector(object): + """Collects trace data. + + Creates a Tracer object for each thread, since they track stack + information. Each Tracer points to the same shared data, contributing + traced data points. + + When the Collector is started, it creates a Tracer for the current thread, + and installs a function to create Tracers for each new thread started. + When the Collector is stopped, all active Tracers are stopped. + + Threads started while the Collector is stopped will never have Tracers + associated with them. + + """ + + # The stack of active Collectors. Collectors are added here when started, + # and popped when stopped. Collectors on the stack are paused when not + # the top, and resumed when they become the top again. + _collectors = [] + + # The concurrency settings we support here. + SUPPORTED_CONCURRENCIES = set(["greenlet", "eventlet", "gevent", "thread"]) + + def __init__(self, should_trace, check_include, timid, branch, warn, concurrency): + """Create a collector. + + `should_trace` is a function, taking a file name and a frame, and + returning a `coverage.FileDisposition object`. + + `check_include` is a function taking a file name and a frame. It returns + a boolean: True if the file should be traced, False if not. + + If `timid` is true, then a slower simpler trace function will be + used. This is important for some environments where manipulation of + tracing functions make the faster more sophisticated trace function not + operate properly. + + If `branch` is true, then branches will be measured. This involves + collecting data on which statements followed each other (arcs). Use + `get_arc_data` to get the arc data. + + `warn` is a warning function, taking a single string message argument + and an optional slug argument which will be a string or None, to be + used if a warning needs to be issued. + + `concurrency` is a list of strings indicating the concurrency libraries + in use. Valid values are "greenlet", "eventlet", "gevent", or "thread" + (the default). Of these four values, only one can be supplied. Other + values are ignored. + + """ + self.should_trace = should_trace + self.check_include = check_include + self.warn = warn + self.branch = branch + self.threading = None + + self.origin = short_stack() + + self.concur_id_func = None + + # We can handle a few concurrency options here, but only one at a time. + these_concurrencies = self.SUPPORTED_CONCURRENCIES.intersection(concurrency) + if len(these_concurrencies) > 1: + raise CoverageException("Conflicting concurrency settings: %s" % concurrency) + self.concurrency = these_concurrencies.pop() if these_concurrencies else '' + + try: + if self.concurrency == "greenlet": + import greenlet + self.concur_id_func = greenlet.getcurrent + elif self.concurrency == "eventlet": + import eventlet.greenthread # pylint: disable=import-error,useless-suppression + self.concur_id_func = eventlet.greenthread.getcurrent + elif self.concurrency == "gevent": + import gevent # pylint: disable=import-error,useless-suppression + self.concur_id_func = gevent.getcurrent + elif self.concurrency == "thread" or not self.concurrency: + # It's important to import threading only if we need it. If + # it's imported early, and the program being measured uses + # gevent, then gevent's monkey-patching won't work properly. + import threading + self.threading = threading + else: + raise CoverageException("Don't understand concurrency=%s" % concurrency) + except ImportError: + raise CoverageException( + "Couldn't trace with concurrency=%s, the module isn't installed." % ( + self.concurrency, + ) + ) + + # Who-Tests-What is just a hack at the moment, so turn it on with an + # environment variable. + self.wtw = int(os.getenv('COVERAGE_WTW', 0)) + + self.reset() + + if timid: + # Being timid: use the simple Python trace function. + self._trace_class = PyTracer + else: + # Being fast: use the C Tracer if it is available, else the Python + # trace function. + self._trace_class = CTracer or PyTracer + + if self._trace_class is CTracer: + self.file_disposition_class = CFileDisposition + self.supports_plugins = True + else: + self.file_disposition_class = FileDisposition + self.supports_plugins = False + + def __repr__(self): + return "" % (id(self), self.tracer_name()) + + def tracer_name(self): + """Return the class name of the tracer we're using.""" + return self._trace_class.__name__ + + def _clear_data(self): + """Clear out existing data, but stay ready for more collection.""" + self.data.clear() + + for tracer in self.tracers: + tracer.reset_activity() + + def reset(self): + """Clear collected data, and prepare to collect more.""" + # A dictionary mapping file names to dicts with line number keys (if not + # branch coverage), or mapping file names to dicts with line number + # pairs as keys (if branch coverage). + self.data = {} + + # A dict mapping contexts to data dictionaries. + self.contexts = {} + self.contexts[None] = self.data + + # A dictionary mapping file names to file tracer plugin names that will + # handle them. + self.file_tracers = {} + + # The .should_trace_cache attribute is a cache from file names to + # coverage.FileDisposition objects, or None. When a file is first + # considered for tracing, a FileDisposition is obtained from + # Coverage.should_trace. Its .trace attribute indicates whether the + # file should be traced or not. If it should be, a plugin with dynamic + # file names can decide not to trace it based on the dynamic file name + # being excluded by the inclusion rules, in which case the + # FileDisposition will be replaced by None in the cache. + if env.PYPY: + import __pypy__ # pylint: disable=import-error + # Alex Gaynor said: + # should_trace_cache is a strictly growing key: once a key is in + # it, it never changes. Further, the keys used to access it are + # generally constant, given sufficient context. That is to say, at + # any given point _trace() is called, pypy is able to know the key. + # This is because the key is determined by the physical source code + # line, and that's invariant with the call site. + # + # This property of a dict with immutable keys, combined with + # call-site-constant keys is a match for PyPy's module dict, + # which is optimized for such workloads. + # + # This gives a 20% benefit on the workload described at + # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage + self.should_trace_cache = __pypy__.newdict("module") + else: + self.should_trace_cache = {} + + # Our active Tracers. + self.tracers = [] + + self._clear_data() + + def _start_tracer(self): + """Start a new Tracer object, and store it in self.tracers.""" + tracer = self._trace_class() + tracer.data = self.data + tracer.trace_arcs = self.branch + tracer.should_trace = self.should_trace + tracer.should_trace_cache = self.should_trace_cache + tracer.warn = self.warn + + if hasattr(tracer, 'concur_id_func'): + tracer.concur_id_func = self.concur_id_func + elif self.concur_id_func: + raise CoverageException( + "Can't support concurrency=%s with %s, only threads are supported" % ( + self.concurrency, self.tracer_name(), + ) + ) + + if hasattr(tracer, 'file_tracers'): + tracer.file_tracers = self.file_tracers + if hasattr(tracer, 'threading'): + tracer.threading = self.threading + if hasattr(tracer, 'check_include'): + tracer.check_include = self.check_include + if self.wtw: + if hasattr(tracer, 'should_start_context'): + tracer.should_start_context = should_start_context + if hasattr(tracer, 'switch_context'): + tracer.switch_context = self.switch_context + + fn = tracer.start() + self.tracers.append(tracer) + + return fn + + # The trace function has to be set individually on each thread before + # execution begins. Ironically, the only support the threading module has + # for running code before the thread main is the tracing function. So we + # install this as a trace function, and the first time it's called, it does + # the real trace installation. + + def _installation_trace(self, frame, event, arg): + """Called on new threads, installs the real tracer.""" + # Remove ourselves as the trace function. + sys.settrace(None) + # Install the real tracer. + fn = self._start_tracer() + # Invoke the real trace function with the current event, to be sure + # not to lose an event. + if fn: + fn = fn(frame, event, arg) + # Return the new trace function to continue tracing in this scope. + return fn + + def start(self): + """Start collecting trace information.""" + if self._collectors: + self._collectors[-1].pause() + + self.tracers = [] + + # Check to see whether we had a fullcoverage tracer installed. If so, + # get the stack frames it stashed away for us. + traces0 = [] + fn0 = sys.gettrace() + if fn0: + tracer0 = getattr(fn0, '__self__', None) + if tracer0: + traces0 = getattr(tracer0, 'traces', []) + + try: + # Install the tracer on this thread. + fn = self._start_tracer() + except: + if self._collectors: + self._collectors[-1].resume() + raise + + # If _start_tracer succeeded, then we add ourselves to the global + # stack of collectors. + self._collectors.append(self) + + # Replay all the events from fullcoverage into the new trace function. + for args in traces0: + (frame, event, arg), lineno = args + try: + fn(frame, event, arg, lineno=lineno) + except TypeError: + raise Exception("fullcoverage must be run with the C trace function.") + + # Install our installation tracer in threading, to jump-start other + # threads. + if self.threading: + self.threading.settrace(self._installation_trace) + + def stop(self): + """Stop collecting trace information.""" + assert self._collectors + if self._collectors[-1] is not self: + print("self._collectors:") + for c in self._collectors: + print(" {!r}\n{}".format(c, c.origin)) + assert self._collectors[-1] is self, ( + "Expected current collector to be %r, but it's %r" % (self, self._collectors[-1]) + ) + + self.pause() + + # Remove this Collector from the stack, and resume the one underneath + # (if any). + self._collectors.pop() + if self._collectors: + self._collectors[-1].resume() + + def pause(self): + """Pause tracing, but be prepared to `resume`.""" + for tracer in self.tracers: + tracer.stop() + stats = tracer.get_stats() + if stats: + print("\nCoverage.py tracer stats:") + for k in sorted(stats.keys()): + print("%20s: %s" % (k, stats[k])) + if self.threading: + self.threading.settrace(None) + + def resume(self): + """Resume tracing after a `pause`.""" + for tracer in self.tracers: + tracer.start() + if self.threading: + self.threading.settrace(self._installation_trace) + else: + self._start_tracer() + + def _activity(self): + """Has any activity been traced? + + Returns a boolean, True if any trace function was invoked. + + """ + return any(tracer.activity() for tracer in self.tracers) + + def switch_context(self, new_context): + """Who-Tests-What hack: switch to a new who-context.""" + # Make a new data dict, or find the existing one, and switch all the + # tracers to use it. + data = self.contexts.setdefault(new_context, {}) + for tracer in self.tracers: + tracer.data = data + + def save_data(self, covdata): + """Save the collected data to a `CoverageData`. + + Returns True if there was data to save, False if not. + """ + if not self._activity(): + return False + + def abs_file_dict(d): + """Return a dict like d, but with keys modified by `abs_file`.""" + # The call to litems() ensures that the GIL protects the dictionary + # iterator against concurrent modifications by tracers running + # in other threads. We try three times in case of concurrent + # access, hoping to get a clean copy. + runtime_err = None + for _ in range(3): + try: + items = litems(d) + except RuntimeError as ex: + runtime_err = ex + else: + break + else: + raise runtime_err # pylint: disable=raising-bad-type + + return dict((abs_file(k), v) for k, v in items) + + if self.branch: + covdata.add_arcs(abs_file_dict(self.data)) + else: + covdata.add_lines(abs_file_dict(self.data)) + covdata.add_file_tracers(abs_file_dict(self.file_tracers)) + + if self.wtw: + # Just a hack, so just hack it. + import pprint + out_file = "coverage_wtw_{:06}.py".format(os.getpid()) + with open(out_file, "w") as wtw_out: + pprint.pprint(self.contexts, wtw_out) + + self._clear_data() + return True diff --git a/env_27/lib/python2.7/site-packages/coverage/config.py b/env_27/lib/python2.7/site-packages/coverage/config.py new file mode 100644 index 0000000..7b8f2bd --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/config.py @@ -0,0 +1,489 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Config file for coverage.py""" + +import collections +import os +import re +import sys + +from coverage.backward import configparser, iitems, string_class +from coverage.misc import contract, CoverageException, isolate_module + +os = isolate_module(os) + + +class HandyConfigParser(configparser.RawConfigParser): + """Our specialization of ConfigParser.""" + + def __init__(self, our_file): + """Create the HandyConfigParser. + + `our_file` is True if this config file is specifically for coverage, + False if we are examining another config file (tox.ini, setup.cfg) + for possible settings. + """ + + configparser.RawConfigParser.__init__(self) + self.section_prefixes = ["coverage:"] + if our_file: + self.section_prefixes.append("") + + def read(self, filenames): + """Read a file name as UTF-8 configuration data.""" + kwargs = {} + if sys.version_info >= (3, 2): + kwargs['encoding'] = "utf-8" + return configparser.RawConfigParser.read(self, filenames, **kwargs) + + def has_option(self, section, option): + for section_prefix in self.section_prefixes: + real_section = section_prefix + section + has = configparser.RawConfigParser.has_option(self, real_section, option) + if has: + return has + return False + + def has_section(self, section): + for section_prefix in self.section_prefixes: + real_section = section_prefix + section + has = configparser.RawConfigParser.has_section(self, real_section) + if has: + return real_section + return False + + def options(self, section): + for section_prefix in self.section_prefixes: + real_section = section_prefix + section + if configparser.RawConfigParser.has_section(self, real_section): + return configparser.RawConfigParser.options(self, real_section) + raise configparser.NoSectionError + + def get_section(self, section): + """Get the contents of a section, as a dictionary.""" + d = {} + for opt in self.options(section): + d[opt] = self.get(section, opt) + return d + + def get(self, section, option, *args, **kwargs): # pylint: disable=arguments-differ + """Get a value, replacing environment variables also. + + The arguments are the same as `RawConfigParser.get`, but in the found + value, ``$WORD`` or ``${WORD}`` are replaced by the value of the + environment variable ``WORD``. + + Returns the finished value. + + """ + for section_prefix in self.section_prefixes: + real_section = section_prefix + section + if configparser.RawConfigParser.has_option(self, real_section, option): + break + else: + raise configparser.NoOptionError + + v = configparser.RawConfigParser.get(self, real_section, option, *args, **kwargs) + def dollar_replace(m): + """Called for each $replacement.""" + # Only one of the groups will have matched, just get its text. + word = next(w for w in m.groups() if w is not None) # pragma: part covered + if word == "$": + return "$" + else: + return os.environ.get(word, '') + + dollar_pattern = r"""(?x) # Use extended regex syntax + \$(?: # A dollar sign, then + (?P\w+) | # a plain word, + {(?P\w+)} | # or a {-wrapped word, + (?P[$]) # or a dollar sign. + ) + """ + v = re.sub(dollar_pattern, dollar_replace, v) + return v + + def getlist(self, section, option): + """Read a list of strings. + + The value of `section` and `option` is treated as a comma- and newline- + separated list of strings. Each value is stripped of whitespace. + + Returns the list of strings. + + """ + value_list = self.get(section, option) + values = [] + for value_line in value_list.split('\n'): + for value in value_line.split(','): + value = value.strip() + if value: + values.append(value) + return values + + def getregexlist(self, section, option): + """Read a list of full-line regexes. + + The value of `section` and `option` is treated as a newline-separated + list of regexes. Each value is stripped of whitespace. + + Returns the list of strings. + + """ + line_list = self.get(section, option) + value_list = [] + for value in line_list.splitlines(): + value = value.strip() + try: + re.compile(value) + except re.error as e: + raise CoverageException( + "Invalid [%s].%s value %r: %s" % (section, option, value, e) + ) + if value: + value_list.append(value) + return value_list + + +# The default line exclusion regexes. +DEFAULT_EXCLUDE = [ + r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)', +] + +# The default partial branch regexes, to be modified by the user. +DEFAULT_PARTIAL = [ + r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)', +] + +# The default partial branch regexes, based on Python semantics. +# These are any Python branching constructs that can't actually execute all +# their branches. +DEFAULT_PARTIAL_ALWAYS = [ + 'while (True|1|False|0):', + 'if (True|1|False|0):', +] + + +class CoverageConfig(object): + """Coverage.py configuration. + + The attributes of this class are the various settings that control the + operation of coverage.py. + + """ + def __init__(self): + """Initialize the configuration attributes to their defaults.""" + # Metadata about the config. + self.attempted_config_files = [] + self.config_files = [] + + # Defaults for [run] and [report] + self._include = None + self._omit = None + + # Defaults for [run] + self.branch = False + self.concurrency = None + self.cover_pylib = False + self.data_file = ".coverage" + self.debug = [] + self.disable_warnings = [] + self.note = None + self.parallel = False + self.plugins = [] + self.source = None + self.run_include = None + self.run_omit = None + self.timid = False + + # Defaults for [report] + self.exclude_list = DEFAULT_EXCLUDE[:] + self.fail_under = 0.0 + self.ignore_errors = False + self.report_include = None + self.report_omit = None + self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:] + self.partial_list = DEFAULT_PARTIAL[:] + self.precision = 0 + self.show_missing = False + self.skip_covered = False + + # Defaults for [html] + self.extra_css = None + self.html_dir = "htmlcov" + self.html_title = "Coverage report" + + # Defaults for [xml] + self.xml_output = "coverage.xml" + self.xml_package_depth = 99 + + # Defaults for [paths] + self.paths = {} + + # Options for plugins + self.plugin_options = {} + + MUST_BE_LIST = [ + "debug", "concurrency", "plugins", + "report_omit", "report_include", + "run_omit", "run_include", + ] + + def from_args(self, **kwargs): + """Read config values from `kwargs`.""" + for k, v in iitems(kwargs): + if v is not None: + if k in self.MUST_BE_LIST and isinstance(v, string_class): + v = [v] + setattr(self, k, v) + + @contract(filename=str) + def from_file(self, filename, our_file): + """Read configuration from a .rc file. + + `filename` is a file name to read. + + `our_file` is True if this config file is specifically for coverage, + False if we are examining another config file (tox.ini, setup.cfg) + for possible settings. + + Returns True or False, whether the file could be read, and it had some + coverage.py settings in it. + + """ + self.attempted_config_files.append(filename) + + cp = HandyConfigParser(our_file) + try: + files_read = cp.read(filename) + except configparser.Error as err: + raise CoverageException("Couldn't read config file %s: %s" % (filename, err)) + if not files_read: + return False + + self.config_files.extend(files_read) + + any_set = False + try: + for option_spec in self.CONFIG_FILE_OPTIONS: + was_set = self._set_attr_from_config_option(cp, *option_spec) + if was_set: + any_set = True + except ValueError as err: + raise CoverageException("Couldn't read config file %s: %s" % (filename, err)) + + # Check that there are no unrecognized options. + all_options = collections.defaultdict(set) + for option_spec in self.CONFIG_FILE_OPTIONS: + section, option = option_spec[1].split(":") + all_options[section].add(option) + + for section, options in iitems(all_options): + real_section = cp.has_section(section) + if real_section: + for unknown in set(cp.options(section)) - options: + raise CoverageException( + "Unrecognized option '[%s] %s=' in config file %s" % ( + real_section, unknown, filename + ) + ) + + # [paths] is special + if cp.has_section('paths'): + for option in cp.options('paths'): + self.paths[option] = cp.getlist('paths', option) + any_set = True + + # plugins can have options + for plugin in self.plugins: + if cp.has_section(plugin): + self.plugin_options[plugin] = cp.get_section(plugin) + any_set = True + + # Was this file used as a config file? If it's specifically our file, + # then it was used. If we're piggybacking on someone else's file, + # then it was only used if we found some settings in it. + if our_file: + return True + else: + return any_set + + CONFIG_FILE_OPTIONS = [ + # These are *args for _set_attr_from_config_option: + # (attr, where, type_="") + # + # attr is the attribute to set on the CoverageConfig object. + # where is the section:name to read from the configuration file. + # type_ is the optional type to apply, by using .getTYPE to read the + # configuration value from the file. + + # [run] + ('branch', 'run:branch', 'boolean'), + ('concurrency', 'run:concurrency', 'list'), + ('cover_pylib', 'run:cover_pylib', 'boolean'), + ('data_file', 'run:data_file'), + ('debug', 'run:debug', 'list'), + ('disable_warnings', 'run:disable_warnings', 'list'), + ('note', 'run:note'), + ('parallel', 'run:parallel', 'boolean'), + ('plugins', 'run:plugins', 'list'), + ('run_include', 'run:include', 'list'), + ('run_omit', 'run:omit', 'list'), + ('source', 'run:source', 'list'), + ('timid', 'run:timid', 'boolean'), + + # [report] + ('exclude_list', 'report:exclude_lines', 'regexlist'), + ('fail_under', 'report:fail_under', 'float'), + ('ignore_errors', 'report:ignore_errors', 'boolean'), + ('partial_always_list', 'report:partial_branches_always', 'regexlist'), + ('partial_list', 'report:partial_branches', 'regexlist'), + ('precision', 'report:precision', 'int'), + ('report_include', 'report:include', 'list'), + ('report_omit', 'report:omit', 'list'), + ('show_missing', 'report:show_missing', 'boolean'), + ('skip_covered', 'report:skip_covered', 'boolean'), + ('sort', 'report:sort'), + + # [html] + ('extra_css', 'html:extra_css'), + ('html_dir', 'html:directory'), + ('html_title', 'html:title'), + + # [xml] + ('xml_output', 'xml:output'), + ('xml_package_depth', 'xml:package_depth', 'int'), + ] + + def _set_attr_from_config_option(self, cp, attr, where, type_=''): + """Set an attribute on self if it exists in the ConfigParser. + + Returns True if the attribute was set. + + """ + section, option = where.split(":") + if cp.has_option(section, option): + method = getattr(cp, 'get' + type_) + setattr(self, attr, method(section, option)) + return True + return False + + def get_plugin_options(self, plugin): + """Get a dictionary of options for the plugin named `plugin`.""" + return self.plugin_options.get(plugin, {}) + + def set_option(self, option_name, value): + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + `value` is the new value for the option. + + """ + + # Check all the hard-coded options. + for option_spec in self.CONFIG_FILE_OPTIONS: + attr, where = option_spec[:2] + if where == option_name: + setattr(self, attr, value) + return + + # See if it's a plugin option. + plugin_name, _, key = option_name.partition(":") + if key and plugin_name in self.plugins: + self.plugin_options.setdefault(plugin_name, {})[key] = value + return + + # If we get here, we didn't find the option. + raise CoverageException("No such option: %r" % option_name) + + def get_option(self, option_name): + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. + + """ + # Check all the hard-coded options. + for option_spec in self.CONFIG_FILE_OPTIONS: + attr, where = option_spec[:2] + if where == option_name: + return getattr(self, attr) + + # See if it's a plugin option. + plugin_name, _, key = option_name.partition(":") + if key and plugin_name in self.plugins: + return self.plugin_options.get(plugin_name, {}).get(key) + + # If we get here, we didn't find the option. + raise CoverageException("No such option: %r" % option_name) + + +def read_coverage_config(config_file, **kwargs): + """Read the coverage.py configuration. + + Arguments: + config_file: a boolean or string, see the `Coverage` class for the + tricky details. + all others: keyword arguments from the `Coverage` class, used for + setting values in the configuration. + + Returns: + config_file, config: + config_file is the value to use for config_file in other + invocations of coverage. + + config is a CoverageConfig object read from the appropriate + configuration file. + + """ + # Build the configuration from a number of sources: + # 1) defaults: + config = CoverageConfig() + + # 2) from a file: + if config_file: + # Some API users were specifying ".coveragerc" to mean the same as + # True, so make it so. + if config_file == ".coveragerc": + config_file = True + specified_file = (config_file is not True) + if not specified_file: + config_file = ".coveragerc" + + for fname, our_file in [(config_file, True), + ("setup.cfg", False), + ("tox.ini", False)]: + config_read = config.from_file(fname, our_file=our_file) + is_config_file = fname == config_file + + if not config_read and is_config_file and specified_file: + raise CoverageException("Couldn't read '%s' as a config file" % fname) + + if config_read: + break + + # 3) from environment variables: + env_data_file = os.environ.get('COVERAGE_FILE') + if env_data_file: + config.data_file = env_data_file + debugs = os.environ.get('COVERAGE_DEBUG') + if debugs: + config.debug.extend(d.strip() for d in debugs.split(",")) + + # 4) from constructor arguments: + config.from_args(**kwargs) + + # Once all the config has been collected, there's a little post-processing + # to do. + config.data_file = os.path.expanduser(config.data_file) + config.html_dir = os.path.expanduser(config.html_dir) + config.xml_output = os.path.expanduser(config.xml_output) + + return config_file, config diff --git a/env_27/lib/python2.7/site-packages/coverage/control.py b/env_27/lib/python2.7/site-packages/coverage/control.py new file mode 100644 index 0000000..cf65d65 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/control.py @@ -0,0 +1,1301 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Core control stuff for coverage.py.""" + + +import atexit +import inspect +import itertools +import os +import platform +import re +import sys +import time +import traceback + +from coverage import env +from coverage.annotate import AnnotateReporter +from coverage.backward import string_class, iitems +from coverage.collector import Collector +from coverage.config import read_coverage_config +from coverage.data import CoverageData, CoverageDataFiles +from coverage.debug import DebugControl, write_formatted_info +from coverage.files import TreeMatcher, FnmatchMatcher +from coverage.files import PathAliases, find_python_files, prep_patterns +from coverage.files import canonical_filename, set_relative_directory +from coverage.files import ModuleMatcher, abs_file +from coverage.html import HtmlReporter +from coverage.misc import CoverageException, bool_or_none, join_regex +from coverage.misc import file_be_gone, isolate_module +from coverage.plugin import FileReporter +from coverage.plugin_support import Plugins +from coverage.python import PythonFileReporter, source_for_file +from coverage.results import Analysis, Numbers +from coverage.summary import SummaryReporter +from coverage.xmlreport import XmlReporter + +try: + from coverage.multiproc import patch_multiprocessing +except ImportError: # pragma: only jython + # Jython has no multiprocessing module. + patch_multiprocessing = None + +os = isolate_module(os) + +# Pypy has some unusual stuff in the "stdlib". Consider those locations +# when deciding where the stdlib is. These modules are not used for anything, +# they are modules importable from the pypy lib directories, so that we can +# find those directories. +_structseq = _pypy_irc_topic = None +if env.PYPY: + try: + import _structseq + except ImportError: + pass + + try: + import _pypy_irc_topic + except ImportError: + pass + + +class Coverage(object): + """Programmatic access to coverage.py. + + To use:: + + from coverage import Coverage + + cov = Coverage() + cov.start() + #.. call your code .. + cov.stop() + cov.html_report(directory='covhtml') + + """ + def __init__( + self, data_file=None, data_suffix=None, cover_pylib=None, + auto_data=False, timid=None, branch=None, config_file=True, + source=None, omit=None, include=None, debug=None, + concurrency=None, + ): + """ + `data_file` is the base name of the data file to use, defaulting to + ".coverage". `data_suffix` is appended (with a dot) to `data_file` to + create the final file name. If `data_suffix` is simply True, then a + suffix is created with the machine and process identity included. + + `cover_pylib` is a boolean determining whether Python code installed + with the Python interpreter is measured. This includes the Python + standard library and any packages installed with the interpreter. + + If `auto_data` is true, then any existing data file will be read when + coverage measurement starts, and data will be saved automatically when + measurement stops. + + If `timid` is true, then a slower and simpler trace function will be + used. This is important for some environments where manipulation of + tracing functions breaks the faster trace function. + + If `branch` is true, then branch coverage will be measured in addition + to the usual statement coverage. + + `config_file` determines what configuration file to read: + + * If it is ".coveragerc", it is interpreted as if it were True, + for backward compatibility. + + * If it is a string, it is the name of the file to read. If the + file can't be read, it is an error. + + * If it is True, then a few standard files names are tried + (".coveragerc", "setup.cfg", "tox.ini"). It is not an error for + these files to not be found. + + * If it is False, then no configuration file is read. + + `source` is a list of file paths or package names. Only code located + in the trees indicated by the file paths or package names will be + measured. + + `include` and `omit` are lists of file name patterns. Files that match + `include` will be measured, files that match `omit` will not. Each + will also accept a single string argument. + + `debug` is a list of strings indicating what debugging information is + desired. + + `concurrency` is a string indicating the concurrency library being used + in the measured code. Without this, coverage.py will get incorrect + results if these libraries are in use. Valid strings are "greenlet", + "eventlet", "gevent", "multiprocessing", or "thread" (the default). + This can also be a list of these strings. + + .. versionadded:: 4.0 + The `concurrency` parameter. + + .. versionadded:: 4.2 + The `concurrency` parameter can now be a list of strings. + + """ + # Build our configuration from a number of sources. + self.config_file, self.config = read_coverage_config( + config_file=config_file, + data_file=data_file, cover_pylib=cover_pylib, timid=timid, + branch=branch, parallel=bool_or_none(data_suffix), + source=source, run_omit=omit, run_include=include, debug=debug, + report_omit=omit, report_include=include, + concurrency=concurrency, + ) + + # This is injectable by tests. + self._debug_file = None + + self._auto_load = self._auto_save = auto_data + self._data_suffix = data_suffix + + # The matchers for _should_trace. + self.source_match = None + self.source_pkgs_match = None + self.pylib_match = self.cover_match = None + self.include_match = self.omit_match = None + + # Is it ok for no data to be collected? + self._warn_no_data = True + self._warn_unimported_source = True + + # A record of all the warnings that have been issued. + self._warnings = [] + + # Other instance attributes, set later. + self.omit = self.include = self.source = None + self.source_pkgs_unmatched = None + self.source_pkgs = None + self.data = self.data_files = self.collector = None + self.plugins = None + self.pylib_paths = self.cover_paths = None + self.data_suffix = self.run_suffix = None + self._exclude_re = None + self.debug = None + + # State machine variables: + # Have we initialized everything? + self._inited = False + # Have we started collecting and not stopped it? + self._started = False + + # If we have sub-process measurement happening automatically, then we + # want any explicit creation of a Coverage object to mean, this process + # is already coverage-aware, so don't auto-measure it. By now, the + # auto-creation of a Coverage object has already happened. But we can + # find it and tell it not to save its data. + if not env.METACOV: + _prevent_sub_process_measurement() + + def _init(self): + """Set all the initial state. + + This is called by the public methods to initialize state. This lets us + construct a :class:`Coverage` object, then tweak its state before this + function is called. + + """ + if self._inited: + return + + self._inited = True + + # Create and configure the debugging controller. COVERAGE_DEBUG_FILE + # is an environment variable, the name of a file to append debug logs + # to. + if self._debug_file is None: + debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE") + if debug_file_name: + self._debug_file = open(debug_file_name, "a") + else: + self._debug_file = sys.stderr + self.debug = DebugControl(self.config.debug, self._debug_file) + + # _exclude_re is a dict that maps exclusion list names to compiled regexes. + self._exclude_re = {} + + set_relative_directory() + + # Load plugins + self.plugins = Plugins.load_plugins(self.config.plugins, self.config, self.debug) + + # Run configuring plugins. + for plugin in self.plugins.configurers: + # We need an object with set_option and get_option. Either self or + # self.config will do. Choosing randomly stops people from doing + # other things with those objects, against the public API. Yes, + # this is a bit childish. :) + plugin.configure([self, self.config][int(time.time()) % 2]) + + # The source argument can be directories or package names. + self.source = [] + self.source_pkgs = [] + for src in self.config.source or []: + if os.path.isdir(src): + self.source.append(canonical_filename(src)) + else: + self.source_pkgs.append(src) + self.source_pkgs_unmatched = self.source_pkgs[:] + + self.omit = prep_patterns(self.config.run_omit) + self.include = prep_patterns(self.config.run_include) + + concurrency = self.config.concurrency or [] + if "multiprocessing" in concurrency: + if not patch_multiprocessing: + raise CoverageException( # pragma: only jython + "multiprocessing is not supported on this Python" + ) + patch_multiprocessing(rcfile=self.config_file) + # Multi-processing uses parallel for the subprocesses, so also use + # it for the main process. + self.config.parallel = True + + self.collector = Collector( + should_trace=self._should_trace, + check_include=self._check_include_omit_etc, + timid=self.config.timid, + branch=self.config.branch, + warn=self._warn, + concurrency=concurrency, + ) + + # Early warning if we aren't going to be able to support plugins. + if self.plugins.file_tracers and not self.collector.supports_plugins: + self._warn( + "Plugin file tracers (%s) aren't supported with %s" % ( + ", ".join( + plugin._coverage_plugin_name + for plugin in self.plugins.file_tracers + ), + self.collector.tracer_name(), + ) + ) + for plugin in self.plugins.file_tracers: + plugin._coverage_enabled = False + + # Suffixes are a bit tricky. We want to use the data suffix only when + # collecting data, not when combining data. So we save it as + # `self.run_suffix` now, and promote it to `self.data_suffix` if we + # find that we are collecting data later. + if self._data_suffix or self.config.parallel: + if not isinstance(self._data_suffix, string_class): + # if data_suffix=True, use .machinename.pid.random + self._data_suffix = True + else: + self._data_suffix = None + self.data_suffix = None + self.run_suffix = self._data_suffix + + # Create the data file. We do this at construction time so that the + # data file will be written into the directory where the process + # started rather than wherever the process eventually chdir'd to. + self.data = CoverageData(debug=self.debug) + self.data_files = CoverageDataFiles( + basename=self.config.data_file, warn=self._warn, debug=self.debug, + ) + + # The directories for files considered "installed with the interpreter". + self.pylib_paths = set() + if not self.config.cover_pylib: + # Look at where some standard modules are located. That's the + # indication for "installed with the interpreter". In some + # environments (virtualenv, for example), these modules may be + # spread across a few locations. Look at all the candidate modules + # we've imported, and take all the different ones. + for m in (atexit, inspect, os, platform, _pypy_irc_topic, re, _structseq, traceback): + if m is not None and hasattr(m, "__file__"): + self.pylib_paths.add(self._canonical_path(m, directory=True)) + + if _structseq and not hasattr(_structseq, '__file__'): + # PyPy 2.4 has no __file__ in the builtin modules, but the code + # objects still have the file names. So dig into one to find + # the path to exclude. + structseq_new = _structseq.structseq_new + try: + structseq_file = structseq_new.func_code.co_filename + except AttributeError: + structseq_file = structseq_new.__code__.co_filename + self.pylib_paths.add(self._canonical_path(structseq_file)) + + # To avoid tracing the coverage.py code itself, we skip anything + # located where we are. + self.cover_paths = [self._canonical_path(__file__, directory=True)] + if env.TESTING: + # Don't include our own test code. + self.cover_paths.append(os.path.join(self.cover_paths[0], "tests")) + + # When testing, we use PyContracts, which should be considered + # part of coverage.py, and it uses six. Exclude those directories + # just as we exclude ourselves. + import contracts + import six + for mod in [contracts, six]: + self.cover_paths.append(self._canonical_path(mod)) + + # Set the reporting precision. + Numbers.set_precision(self.config.precision) + + atexit.register(self._atexit) + + # Create the matchers we need for _should_trace + if self.source or self.source_pkgs: + self.source_match = TreeMatcher(self.source) + self.source_pkgs_match = ModuleMatcher(self.source_pkgs) + else: + if self.cover_paths: + self.cover_match = TreeMatcher(self.cover_paths) + if self.pylib_paths: + self.pylib_match = TreeMatcher(self.pylib_paths) + if self.include: + self.include_match = FnmatchMatcher(self.include) + if self.omit: + self.omit_match = FnmatchMatcher(self.omit) + + # The user may want to debug things, show info if desired. + self._write_startup_debug() + + def _write_startup_debug(self): + """Write out debug info at startup if needed.""" + wrote_any = False + with self.debug.without_callers(): + if self.debug.should('config'): + config_info = sorted(self.config.__dict__.items()) + write_formatted_info(self.debug, "config", config_info) + wrote_any = True + + if self.debug.should('sys'): + write_formatted_info(self.debug, "sys", self.sys_info()) + for plugin in self.plugins: + header = "sys: " + plugin._coverage_plugin_name + info = plugin.sys_info() + write_formatted_info(self.debug, header, info) + wrote_any = True + + if wrote_any: + write_formatted_info(self.debug, "end", ()) + + def _canonical_path(self, morf, directory=False): + """Return the canonical path of the module or file `morf`. + + If the module is a package, then return its directory. If it is a + module, then return its file, unless `directory` is True, in which + case return its enclosing directory. + + """ + morf_path = PythonFileReporter(morf, self).filename + if morf_path.endswith("__init__.py") or directory: + morf_path = os.path.split(morf_path)[0] + return morf_path + + def _name_for_module(self, module_globals, filename): + """Get the name of the module for a set of globals and file name. + + For configurability's sake, we allow __main__ modules to be matched by + their importable name. + + If loaded via runpy (aka -m), we can usually recover the "original" + full dotted module name, otherwise, we resort to interpreting the + file name to get the module's name. In the case that the module name + can't be determined, None is returned. + + """ + if module_globals is None: # pragma: only ironpython + # IronPython doesn't provide globals: https://github.com/IronLanguages/main/issues/1296 + module_globals = {} + + dunder_name = module_globals.get('__name__', None) + + if isinstance(dunder_name, str) and dunder_name != '__main__': + # This is the usual case: an imported module. + return dunder_name + + loader = module_globals.get('__loader__', None) + for attrname in ('fullname', 'name'): # attribute renamed in py3.2 + if hasattr(loader, attrname): + fullname = getattr(loader, attrname) + else: + continue + + if isinstance(fullname, str) and fullname != '__main__': + # Module loaded via: runpy -m + return fullname + + # Script as first argument to Python command line. + inspectedname = inspect.getmodulename(filename) + if inspectedname is not None: + return inspectedname + else: + return dunder_name + + def _should_trace_internal(self, filename, frame): + """Decide whether to trace execution in `filename`, with a reason. + + This function is called from the trace function. As each new file name + is encountered, this function determines whether it is traced or not. + + Returns a FileDisposition object. + + """ + original_filename = filename + disp = _disposition_init(self.collector.file_disposition_class, filename) + + def nope(disp, reason): + """Simple helper to make it easy to return NO.""" + disp.trace = False + disp.reason = reason + return disp + + # Compiled Python files have two file names: frame.f_code.co_filename is + # the file name at the time the .pyc was compiled. The second name is + # __file__, which is where the .pyc was actually loaded from. Since + # .pyc files can be moved after compilation (for example, by being + # installed), we look for __file__ in the frame and prefer it to the + # co_filename value. + dunder_file = frame.f_globals and frame.f_globals.get('__file__') + if dunder_file: + filename = source_for_file(dunder_file) + if original_filename and not original_filename.startswith('<'): + orig = os.path.basename(original_filename) + if orig != os.path.basename(filename): + # Files shouldn't be renamed when moved. This happens when + # exec'ing code. If it seems like something is wrong with + # the frame's file name, then just use the original. + filename = original_filename + + if not filename: + # Empty string is pretty useless. + return nope(disp, "empty string isn't a file name") + + if filename.startswith('memory:'): + return nope(disp, "memory isn't traceable") + + if filename.startswith('<'): + # Lots of non-file execution is represented with artificial + # file names like "", "", or + # "". Don't ever trace these executions, since we + # can't do anything with the data later anyway. + return nope(disp, "not a real file name") + + # pyexpat does a dumb thing, calling the trace function explicitly from + # C code with a C file name. + if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename): + return nope(disp, "pyexpat lies about itself") + + # Jython reports the .class file to the tracer, use the source file. + if filename.endswith("$py.class"): + filename = filename[:-9] + ".py" + + canonical = canonical_filename(filename) + disp.canonical_filename = canonical + + # Try the plugins, see if they have an opinion about the file. + plugin = None + for plugin in self.plugins.file_tracers: + if not plugin._coverage_enabled: + continue + + try: + file_tracer = plugin.file_tracer(canonical) + if file_tracer is not None: + file_tracer._coverage_plugin = plugin + disp.trace = True + disp.file_tracer = file_tracer + if file_tracer.has_dynamic_source_filename(): + disp.has_dynamic_filename = True + else: + disp.source_filename = canonical_filename( + file_tracer.source_filename() + ) + break + except Exception: + self._warn( + "Disabling plug-in %r due to an exception:" % ( + plugin._coverage_plugin_name + ) + ) + traceback.print_exc() + plugin._coverage_enabled = False + continue + else: + # No plugin wanted it: it's Python. + disp.trace = True + disp.source_filename = canonical + + if not disp.has_dynamic_filename: + if not disp.source_filename: + raise CoverageException( + "Plugin %r didn't set source_filename for %r" % + (plugin, disp.original_filename) + ) + reason = self._check_include_omit_etc_internal( + disp.source_filename, frame, + ) + if reason: + nope(disp, reason) + + return disp + + def _check_include_omit_etc_internal(self, filename, frame): + """Check a file name against the include, omit, etc, rules. + + Returns a string or None. String means, don't trace, and is the reason + why. None means no reason found to not trace. + + """ + modulename = self._name_for_module(frame.f_globals, filename) + + # If the user specified source or include, then that's authoritative + # about the outer bound of what to measure and we don't have to apply + # any canned exclusions. If they didn't, then we have to exclude the + # stdlib and coverage.py directories. + if self.source_match: + if self.source_pkgs_match.match(modulename): + if modulename in self.source_pkgs_unmatched: + self.source_pkgs_unmatched.remove(modulename) + elif not self.source_match.match(filename): + return "falls outside the --source trees" + elif self.include_match: + if not self.include_match.match(filename): + return "falls outside the --include trees" + else: + # If we aren't supposed to trace installed code, then check if this + # is near the Python standard library and skip it if so. + if self.pylib_match and self.pylib_match.match(filename): + return "is in the stdlib" + + # We exclude the coverage.py code itself, since a little of it + # will be measured otherwise. + if self.cover_match and self.cover_match.match(filename): + return "is part of coverage.py" + + # Check the file against the omit pattern. + if self.omit_match and self.omit_match.match(filename): + return "is inside an --omit pattern" + + # No reason found to skip this file. + return None + + def _should_trace(self, filename, frame): + """Decide whether to trace execution in `filename`. + + Calls `_should_trace_internal`, and returns the FileDisposition. + + """ + disp = self._should_trace_internal(filename, frame) + if self.debug.should('trace'): + self.debug.write(_disposition_debug_msg(disp)) + return disp + + def _check_include_omit_etc(self, filename, frame): + """Check a file name against the include/omit/etc, rules, verbosely. + + Returns a boolean: True if the file should be traced, False if not. + + """ + reason = self._check_include_omit_etc_internal(filename, frame) + if self.debug.should('trace'): + if not reason: + msg = "Including %r" % (filename,) + else: + msg = "Not including %r: %s" % (filename, reason) + self.debug.write(msg) + + return not reason + + def _warn(self, msg, slug=None): + """Use `msg` as a warning. + + For warning suppression, use `slug` as the shorthand. + """ + if slug in self.config.disable_warnings: + # Don't issue the warning + return + + self._warnings.append(msg) + if slug: + msg = "%s (%s)" % (msg, slug) + if self.debug.should('pid'): + msg = "[%d] %s" % (os.getpid(), msg) + sys.stderr.write("Coverage.py warning: %s\n" % msg) + + def get_option(self, option_name): + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. + + .. versionadded:: 4.0 + + """ + return self.config.get_option(option_name) + + def set_option(self, option_name, value): + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with ``"run:branch"``. + + `value` is the new value for the option. This should be an + appropriate Python value. For example, use True for booleans, not the + string ``"True"``. + + As an example, calling:: + + cov.set_option("run:branch", True) + + has the same effect as this configuration file:: + + [run] + branch = True + + .. versionadded:: 4.0 + + """ + self.config.set_option(option_name, value) + + def use_cache(self, usecache): + """Obsolete method.""" + self._init() + if not usecache: + self._warn("use_cache(False) is no longer supported.") + + def load(self): + """Load previously-collected coverage data from the data file.""" + self._init() + self.collector.reset() + self.data_files.read(self.data) + + def start(self): + """Start measuring code coverage. + + Coverage measurement only occurs in functions called after + :meth:`start` is invoked. Statements in the same scope as + :meth:`start` won't be measured. + + Once you invoke :meth:`start`, you must also call :meth:`stop` + eventually, or your process might not shut down cleanly. + + """ + self._init() + if self.include: + if self.source or self.source_pkgs: + self._warn("--include is ignored because --source is set", slug="include-ignored") + if self.run_suffix: + # Calling start() means we're running code, so use the run_suffix + # as the data_suffix when we eventually save the data. + self.data_suffix = self.run_suffix + if self._auto_load: + self.load() + + self.collector.start() + self._started = True + + def stop(self): + """Stop measuring code coverage.""" + if self._started: + self.collector.stop() + self._started = False + + def _atexit(self): + """Clean up on process shutdown.""" + if self.debug.should("process"): + self.debug.write("atexit: {0!r}".format(self)) + if self._started: + self.stop() + if self._auto_save: + self.save() + + def erase(self): + """Erase previously-collected coverage data. + + This removes the in-memory data collected in this session as well as + discarding the data file. + + """ + self._init() + self.collector.reset() + self.data.erase() + self.data_files.erase(parallel=self.config.parallel) + + def clear_exclude(self, which='exclude'): + """Clear the exclude list.""" + self._init() + setattr(self.config, which + "_list", []) + self._exclude_regex_stale() + + def exclude(self, regex, which='exclude'): + """Exclude source lines from execution consideration. + + A number of lists of regular expressions are maintained. Each list + selects lines that are treated differently during reporting. + + `which` determines which list is modified. The "exclude" list selects + lines that are not considered executable at all. The "partial" list + indicates lines with branches that are not taken. + + `regex` is a regular expression. The regex is added to the specified + list. If any of the regexes in the list is found in a line, the line + is marked for special treatment during reporting. + + """ + self._init() + excl_list = getattr(self.config, which + "_list") + excl_list.append(regex) + self._exclude_regex_stale() + + def _exclude_regex_stale(self): + """Drop all the compiled exclusion regexes, a list was modified.""" + self._exclude_re.clear() + + def _exclude_regex(self, which): + """Return a compiled regex for the given exclusion list.""" + if which not in self._exclude_re: + excl_list = getattr(self.config, which + "_list") + self._exclude_re[which] = join_regex(excl_list) + return self._exclude_re[which] + + def get_exclude_list(self, which='exclude'): + """Return a list of excluded regex patterns. + + `which` indicates which list is desired. See :meth:`exclude` for the + lists that are available, and their meaning. + + """ + self._init() + return getattr(self.config, which + "_list") + + def save(self): + """Save the collected coverage data to the data file.""" + self._init() + self.get_data() + self.data_files.write(self.data, suffix=self.data_suffix) + + def combine(self, data_paths=None, strict=False): + """Combine together a number of similarly-named coverage data files. + + All coverage data files whose name starts with `data_file` (from the + coverage() constructor) will be read, and combined together into the + current measurements. + + `data_paths` is a list of files or directories from which data should + be combined. If no list is passed, then the data files from the + directory indicated by the current data file (probably the current + directory) will be combined. + + If `strict` is true, then it is an error to attempt to combine when + there are no data files to combine. + + .. versionadded:: 4.0 + The `data_paths` parameter. + + .. versionadded:: 4.3 + The `strict` parameter. + + """ + self._init() + self.get_data() + + aliases = None + if self.config.paths: + aliases = PathAliases() + for paths in self.config.paths.values(): + result = paths[0] + for pattern in paths[1:]: + aliases.add(pattern, result) + + self.data_files.combine_parallel_data( + self.data, aliases=aliases, data_paths=data_paths, strict=strict, + ) + + def get_data(self): + """Get the collected data. + + Also warn about various problems collecting data. + + Returns a :class:`coverage.CoverageData`, the collected coverage data. + + .. versionadded:: 4.0 + + """ + self._init() + + if self.collector.save_data(self.data): + self._post_save_work() + + return self.data + + def _post_save_work(self): + """After saving data, look for warnings, post-work, etc. + + Warn about things that should have happened but didn't. + Look for unexecuted files. + + """ + # If there are still entries in the source_pkgs_unmatched list, + # then we never encountered those packages. + if self._warn_unimported_source: + for pkg in self.source_pkgs_unmatched: + self._warn_about_unmeasured_code(pkg) + + # Find out if we got any data. + if not self.data and self._warn_no_data: + self._warn("No data was collected.", slug="no-data-collected") + + # Find files that were never executed at all. + for pkg in self.source_pkgs: + if (not pkg in sys.modules or + not module_has_file(sys.modules[pkg])): + continue + pkg_file = source_for_file(sys.modules[pkg].__file__) + self._find_unexecuted_files(self._canonical_path(pkg_file)) + + for src in self.source: + self._find_unexecuted_files(src) + + if self.config.note: + self.data.add_run_info(note=self.config.note) + + def _warn_about_unmeasured_code(self, pkg): + """Warn about a package or module that we never traced. + + `pkg` is a string, the name of the package or module. + + """ + mod = sys.modules.get(pkg) + if mod is None: + self._warn("Module %s was never imported." % pkg, slug="module-not-imported") + return + + if module_is_namespace(mod): + # A namespace package. It's OK for this not to have been traced, + # since there is no code directly in it. + return + + if not module_has_file(mod): + self._warn("Module %s has no Python source." % pkg, slug="module-not-python") + return + + # The module was in sys.modules, and seems like a module with code, but + # we never measured it. I guess that means it was imported before + # coverage even started. + self._warn( + "Module %s was previously imported, but not measured" % pkg, + slug="module-not-measured", + ) + + def _find_plugin_files(self, src_dir): + """Get executable files from the plugins.""" + for plugin in self.plugins.file_tracers: + for x_file in plugin.find_executable_files(src_dir): + yield x_file, plugin._coverage_plugin_name + + def _find_unexecuted_files(self, src_dir): + """Find unexecuted files in `src_dir`. + + Search for files in `src_dir` that are probably importable, + and add them as unexecuted files in `self.data`. + + """ + py_files = ((py_file, None) for py_file in find_python_files(src_dir)) + plugin_files = self._find_plugin_files(src_dir) + + for file_path, plugin_name in itertools.chain(py_files, plugin_files): + file_path = canonical_filename(file_path) + if self.omit_match and self.omit_match.match(file_path): + # Turns out this file was omitted, so don't pull it back + # in as unexecuted. + continue + self.data.touch_file(file_path, plugin_name) + + # Backward compatibility with version 1. + def analysis(self, morf): + """Like `analysis2` but doesn't return excluded line numbers.""" + f, s, _, m, mf = self.analysis2(morf) + return f, s, m, mf + + def analysis2(self, morf): + """Analyze a module. + + `morf` is a module or a file name. It will be analyzed to determine + its coverage statistics. The return value is a 5-tuple: + + * The file name for the module. + * A list of line numbers of executable statements. + * A list of line numbers of excluded statements. + * A list of line numbers of statements not run (missing from + execution). + * A readable formatted string of the missing line numbers. + + The analysis uses the source file itself and the current measured + coverage data. + + """ + self._init() + analysis = self._analyze(morf) + return ( + analysis.filename, + sorted(analysis.statements), + sorted(analysis.excluded), + sorted(analysis.missing), + analysis.missing_formatted(), + ) + + def _analyze(self, it): + """Analyze a single morf or code unit. + + Returns an `Analysis` object. + + """ + self.get_data() + if not isinstance(it, FileReporter): + it = self._get_file_reporter(it) + + return Analysis(self.data, it) + + def _get_file_reporter(self, morf): + """Get a FileReporter for a module or file name.""" + plugin = None + file_reporter = "python" + + if isinstance(morf, string_class): + abs_morf = abs_file(morf) + plugin_name = self.data.file_tracer(abs_morf) + if plugin_name: + plugin = self.plugins.get(plugin_name) + + if plugin: + file_reporter = plugin.file_reporter(abs_morf) + if file_reporter is None: + raise CoverageException( + "Plugin %r did not provide a file reporter for %r." % ( + plugin._coverage_plugin_name, morf + ) + ) + + if file_reporter == "python": + file_reporter = PythonFileReporter(morf, self) + + return file_reporter + + def _get_file_reporters(self, morfs=None): + """Get a list of FileReporters for a list of modules or file names. + + For each module or file name in `morfs`, find a FileReporter. Return + the list of FileReporters. + + If `morfs` is a single module or file name, this returns a list of one + FileReporter. If `morfs` is empty or None, then the list of all files + measured is used to find the FileReporters. + + """ + if not morfs: + morfs = self.data.measured_files() + + # Be sure we have a list. + if not isinstance(morfs, (list, tuple)): + morfs = [morfs] + + file_reporters = [] + for morf in morfs: + file_reporter = self._get_file_reporter(morf) + file_reporters.append(file_reporter) + + return file_reporters + + def report( + self, morfs=None, show_missing=None, ignore_errors=None, + file=None, # pylint: disable=redefined-builtin + omit=None, include=None, skip_covered=None, + ): + """Write a summary report to `file`. + + Each module in `morfs` is listed, with counts of statements, executed + statements, missing statements, and a list of lines missed. + + `include` is a list of file name patterns. Files that match will be + included in the report. Files matching `omit` will not be included in + the report. + + If `skip_covered` is True, don't report on files with 100% coverage. + + Returns a float, the total percentage covered. + + """ + self.get_data() + self.config.from_args( + ignore_errors=ignore_errors, report_omit=omit, report_include=include, + show_missing=show_missing, skip_covered=skip_covered, + ) + reporter = SummaryReporter(self, self.config) + return reporter.report(morfs, outfile=file) + + def annotate( + self, morfs=None, directory=None, ignore_errors=None, + omit=None, include=None, + ): + """Annotate a list of modules. + + Each module in `morfs` is annotated. The source is written to a new + file, named with a ",cover" suffix, with each line prefixed with a + marker to indicate the coverage of the line. Covered lines have ">", + excluded lines have "-", and missing lines have "!". + + See :meth:`report` for other arguments. + + """ + self.get_data() + self.config.from_args( + ignore_errors=ignore_errors, report_omit=omit, report_include=include + ) + reporter = AnnotateReporter(self, self.config) + reporter.report(morfs, directory=directory) + + def html_report(self, morfs=None, directory=None, ignore_errors=None, + omit=None, include=None, extra_css=None, title=None, + skip_covered=None): + """Generate an HTML report. + + The HTML is written to `directory`. The file "index.html" is the + overview starting point, with links to more detailed pages for + individual modules. + + `extra_css` is a path to a file of other CSS to apply on the page. + It will be copied into the HTML directory. + + `title` is a text string (not HTML) to use as the title of the HTML + report. + + See :meth:`report` for other arguments. + + Returns a float, the total percentage covered. + + """ + self.get_data() + self.config.from_args( + ignore_errors=ignore_errors, report_omit=omit, report_include=include, + html_dir=directory, extra_css=extra_css, html_title=title, + skip_covered=skip_covered, + ) + reporter = HtmlReporter(self, self.config) + return reporter.report(morfs) + + def xml_report( + self, morfs=None, outfile=None, ignore_errors=None, + omit=None, include=None, + ): + """Generate an XML report of coverage results. + + The report is compatible with Cobertura reports. + + Each module in `morfs` is included in the report. `outfile` is the + path to write the file to, "-" will write to stdout. + + See :meth:`report` for other arguments. + + Returns a float, the total percentage covered. + + """ + self.get_data() + self.config.from_args( + ignore_errors=ignore_errors, report_omit=omit, report_include=include, + xml_output=outfile, + ) + file_to_close = None + delete_file = False + if self.config.xml_output: + if self.config.xml_output == '-': + outfile = sys.stdout + else: + # Ensure that the output directory is created; done here + # because this report pre-opens the output file. + # HTMLReport does this using the Report plumbing because + # its task is more complex, being multiple files. + output_dir = os.path.dirname(self.config.xml_output) + if output_dir and not os.path.isdir(output_dir): + os.makedirs(output_dir) + open_kwargs = {} + if env.PY3: + open_kwargs['encoding'] = 'utf8' + outfile = open(self.config.xml_output, "w", **open_kwargs) + file_to_close = outfile + try: + reporter = XmlReporter(self, self.config) + return reporter.report(morfs, outfile=outfile) + except CoverageException: + delete_file = True + raise + finally: + if file_to_close: + file_to_close.close() + if delete_file: + file_be_gone(self.config.xml_output) + + def sys_info(self): + """Return a list of (key, value) pairs showing internal information.""" + + import coverage as covmod + + self._init() + + def plugin_info(plugins): + """Make an entry for the sys_info from a list of plug-ins.""" + entries = [] + for plugin in plugins: + entry = plugin._coverage_plugin_name + if not plugin._coverage_enabled: + entry += " (disabled)" + entries.append(entry) + return entries + + info = [ + ('version', covmod.__version__), + ('coverage', covmod.__file__), + ('cover_paths', self.cover_paths), + ('pylib_paths', self.pylib_paths), + ('tracer', self.collector.tracer_name()), + ('plugins.file_tracers', plugin_info(self.plugins.file_tracers)), + ('plugins.configurers', plugin_info(self.plugins.configurers)), + ('config_files', self.config.attempted_config_files), + ('configs_read', self.config.config_files), + ('data_path', self.data_files.filename), + ('python', sys.version.replace('\n', '')), + ('platform', platform.platform()), + ('implementation', platform.python_implementation()), + ('executable', sys.executable), + ('cwd', os.getcwd()), + ('path', sys.path), + ('environment', sorted( + ("%s = %s" % (k, v)) + for k, v in iitems(os.environ) + if k.startswith(("COV", "PY")) + )), + ('command_line', " ".join(getattr(sys, 'argv', ['???']))), + ] + + matcher_names = [ + 'source_match', 'source_pkgs_match', + 'include_match', 'omit_match', + 'cover_match', 'pylib_match', + ] + + for matcher_name in matcher_names: + matcher = getattr(self, matcher_name) + if matcher: + matcher_info = matcher.info() + else: + matcher_info = '-none-' + info.append((matcher_name, matcher_info)) + + return info + + +def module_is_namespace(mod): + """Is the module object `mod` a PEP420 namespace module?""" + return hasattr(mod, '__path__') and getattr(mod, '__file__', None) is None + + +def module_has_file(mod): + """Does the module object `mod` have an existing __file__ ?""" + mod__file__ = getattr(mod, '__file__', None) + if mod__file__ is None: + return False + return os.path.exists(mod__file__) + + +# FileDisposition "methods": FileDisposition is a pure value object, so it can +# be implemented in either C or Python. Acting on them is done with these +# functions. + +def _disposition_init(cls, original_filename): + """Construct and initialize a new FileDisposition object.""" + disp = cls() + disp.original_filename = original_filename + disp.canonical_filename = original_filename + disp.source_filename = None + disp.trace = False + disp.reason = "" + disp.file_tracer = None + disp.has_dynamic_filename = False + return disp + + +def _disposition_debug_msg(disp): + """Make a nice debug message of what the FileDisposition is doing.""" + if disp.trace: + msg = "Tracing %r" % (disp.original_filename,) + if disp.file_tracer: + msg += ": will be traced by %r" % disp.file_tracer + else: + msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason) + return msg + + +def process_startup(): + """Call this at Python start-up to perhaps measure coverage. + + If the environment variable COVERAGE_PROCESS_START is defined, coverage + measurement is started. The value of the variable is the config file + to use. + + There are two ways to configure your Python installation to invoke this + function when Python starts: + + #. Create or append to sitecustomize.py to add these lines:: + + import coverage + coverage.process_startup() + + #. Create a .pth file in your Python installation containing:: + + import coverage; coverage.process_startup() + + Returns the :class:`Coverage` instance that was started, or None if it was + not started by this call. + + """ + cps = os.environ.get("COVERAGE_PROCESS_START") + if not cps: + # No request for coverage, nothing to do. + return None + + # This function can be called more than once in a process. This happens + # because some virtualenv configurations make the same directory visible + # twice in sys.path. This means that the .pth file will be found twice, + # and executed twice, executing this function twice. We set a global + # flag (an attribute on this function) to indicate that coverage.py has + # already been started, so we can avoid doing it twice. + # + # https://bitbucket.org/ned/coveragepy/issue/340/keyerror-subpy has more + # details. + + if hasattr(process_startup, "coverage"): + # We've annotated this function before, so we must have already + # started coverage.py in this process. Nothing to do. + return None + + cov = Coverage(config_file=cps) + process_startup.coverage = cov + cov.start() + cov._warn_no_data = False + cov._warn_unimported_source = False + cov._auto_save = True + + return cov + + +def _prevent_sub_process_measurement(): + """Stop any subprocess auto-measurement from writing data.""" + auto_created_coverage = getattr(process_startup, "coverage", None) + if auto_created_coverage is not None: + auto_created_coverage._auto_save = False diff --git a/env_27/lib/python2.7/site-packages/coverage/data.py b/env_27/lib/python2.7/site-packages/coverage/data.py new file mode 100644 index 0000000..6f76a72 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/data.py @@ -0,0 +1,794 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Coverage data for coverage.py.""" + +import glob +import itertools +import json +import optparse +import os +import os.path +import random +import re +import socket + +from coverage import env +from coverage.backward import iitems, string_class +from coverage.debug import _TEST_NAME_FILE +from coverage.files import PathAliases +from coverage.misc import CoverageException, file_be_gone, isolate_module + +os = isolate_module(os) + + +class CoverageData(object): + """Manages collected coverage data, including file storage. + + This class is the public supported API to the data coverage.py collects + during program execution. It includes information about what code was + executed. It does not include information from the analysis phase, to + determine what lines could have been executed, or what lines were not + executed. + + .. note:: + + The file format is not documented or guaranteed. It will change in + the future, in possibly complicated ways. Do not read coverage.py + data files directly. Use this API to avoid disruption. + + There are a number of kinds of data that can be collected: + + * **lines**: the line numbers of source lines that were executed. + These are always available. + + * **arcs**: pairs of source and destination line numbers for transitions + between source lines. These are only available if branch coverage was + used. + + * **file tracer names**: the module names of the file tracer plugins that + handled each file in the data. + + * **run information**: information about the program execution. This is + written during "coverage run", and then accumulated during "coverage + combine". + + Lines, arcs, and file tracer names are stored for each source file. File + names in this API are case-sensitive, even on platforms with + case-insensitive file systems. + + To read a coverage.py data file, use :meth:`read_file`, or + :meth:`read_fileobj` if you have an already-opened file. You can then + access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`, + or :meth:`file_tracer`. Run information is available with + :meth:`run_infos`. + + The :meth:`has_arcs` method indicates whether arc data is available. You + can get a list of the files in the data with :meth:`measured_files`. + A summary of the line data is available from :meth:`line_counts`. As with + most Python containers, you can determine if there is any data at all by + using this object as a boolean value. + + + Most data files will be created by coverage.py itself, but you can use + methods here to create data files if you like. The :meth:`add_lines`, + :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways + that are convenient for coverage.py. The :meth:`add_run_info` method adds + key-value pairs to the run information. + + To add a file without any measured data, use :meth:`touch_file`. + + You write to a named file with :meth:`write_file`, or to an already opened + file with :meth:`write_fileobj`. + + You can clear the data in memory with :meth:`erase`. Two data collections + can be combined by using :meth:`update` on one :class:`CoverageData`, + passing it the other. + + """ + + # The data file format is JSON, with these keys: + # + # * lines: a dict mapping file names to lists of line numbers + # executed:: + # + # { "file1": [17,23,45], "file2": [1,2,3], ... } + # + # * arcs: a dict mapping file names to lists of line number pairs:: + # + # { "file1": [[17,23], [17,25], [25,26]], ... } + # + # * file_tracers: a dict mapping file names to plugin names:: + # + # { "file1": "django.coverage", ... } + # + # * runs: a list of dicts of information about the coverage.py runs + # contributing to the data:: + # + # [ { "brief_sys": "CPython 2.7.10 Darwin" }, ... ] + # + # Only one of `lines` or `arcs` will be present: with branch coverage, data + # is stored as arcs. Without branch coverage, it is stored as lines. The + # line data is easily recovered from the arcs: it is all the first elements + # of the pairs that are greater than zero. + + def __init__(self, debug=None): + """Create a CoverageData. + + `debug` is a `DebugControl` object for writing debug messages. + + """ + self._debug = debug + + # A map from canonical Python source file name to a dictionary in + # which there's an entry for each line number that has been + # executed: + # + # { 'filename1.py': [12, 47, 1001], ... } + # + self._lines = None + + # A map from canonical Python source file name to a dictionary with an + # entry for each pair of line numbers forming an arc: + # + # { 'filename1.py': [(12,14), (47,48), ... ], ... } + # + self._arcs = None + + # A map from canonical source file name to a plugin module name: + # + # { 'filename1.py': 'django.coverage', ... } + # + self._file_tracers = {} + + # A list of dicts of information about the coverage.py runs. + self._runs = [] + + def __repr__(self): + return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format( + klass=self.__class__.__name__, + lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)), + arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)), + tracers="{{{0}}}".format(len(self._file_tracers)), + runs="[{0}]".format(len(self._runs)), + ) + + ## + ## Reading data + ## + + def has_arcs(self): + """Does this data have arcs? + + Arc data is only available if branch coverage was used during + collection. + + Returns a boolean. + + """ + return self._has_arcs() + + def lines(self, filename): + """Get the list of lines executed for a file. + + If the file was not measured, returns None. A file might be measured, + and have no lines executed, in which case an empty list is returned. + + If the file was executed, returns a list of integers, the line numbers + executed in the file. The list is in no particular order. + + """ + if self._arcs is not None: + arcs = self._arcs.get(filename) + if arcs is not None: + all_lines = itertools.chain.from_iterable(arcs) + return list(set(l for l in all_lines if l > 0)) + elif self._lines is not None: + return self._lines.get(filename) + return None + + def arcs(self, filename): + """Get the list of arcs executed for a file. + + If the file was not measured, returns None. A file might be measured, + and have no arcs executed, in which case an empty list is returned. + + If the file was executed, returns a list of 2-tuples of integers. Each + pair is a starting line number and an ending line number for a + transition from one line to another. The list is in no particular + order. + + Negative numbers have special meaning. If the starting line number is + -N, it represents an entry to the code object that starts at line N. + If the ending ling number is -N, it's an exit from the code object that + starts at line N. + + """ + if self._arcs is not None: + if filename in self._arcs: + return self._arcs[filename] + return None + + def file_tracer(self, filename): + """Get the plugin name of the file tracer for a file. + + Returns the name of the plugin that handles this file. If the file was + measured, but didn't use a plugin, then "" is returned. If the file + was not measured, then None is returned. + + """ + # Because the vast majority of files involve no plugin, we don't store + # them explicitly in self._file_tracers. Check the measured data + # instead to see if it was a known file with no plugin. + if filename in (self._arcs or self._lines or {}): + return self._file_tracers.get(filename, "") + return None + + def run_infos(self): + """Return the list of dicts of run information. + + For data collected during a single run, this will be a one-element + list. If data has been combined, there will be one element for each + original data file. + + """ + return self._runs + + def measured_files(self): + """A list of all files that had been measured.""" + return list(self._arcs or self._lines or {}) + + def line_counts(self, fullpath=False): + """Return a dict summarizing the line coverage data. + + Keys are based on the file names, and values are the number of executed + lines. If `fullpath` is true, then the keys are the full pathnames of + the files, otherwise they are the basenames of the files. + + Returns a dict mapping file names to counts of lines. + + """ + summ = {} + if fullpath: + filename_fn = lambda f: f + else: + filename_fn = os.path.basename + for filename in self.measured_files(): + summ[filename_fn(filename)] = len(self.lines(filename)) + return summ + + def __nonzero__(self): + return bool(self._lines or self._arcs) + + __bool__ = __nonzero__ + + def read_fileobj(self, file_obj): + """Read the coverage data from the given file object. + + Should only be used on an empty CoverageData object. + + """ + data = self._read_raw_data(file_obj) + + self._lines = self._arcs = None + + if 'lines' in data: + self._lines = data['lines'] + if 'arcs' in data: + self._arcs = dict( + (fname, [tuple(pair) for pair in arcs]) + for fname, arcs in iitems(data['arcs']) + ) + self._file_tracers = data.get('file_tracers', {}) + self._runs = data.get('runs', []) + + self._validate() + + def read_file(self, filename): + """Read the coverage data from `filename` into this object.""" + if self._debug and self._debug.should('dataio'): + self._debug.write("Reading data from %r" % (filename,)) + try: + with self._open_for_reading(filename) as f: + self.read_fileobj(f) + except Exception as exc: + raise CoverageException( + "Couldn't read data from '%s': %s: %s" % ( + filename, exc.__class__.__name__, exc, + ) + ) + + _GO_AWAY = "!coverage.py: This is a private format, don't read it directly!" + + @classmethod + def _open_for_reading(cls, filename): + """Open a file appropriately for reading data.""" + return open(filename, "r") + + @classmethod + def _read_raw_data(cls, file_obj): + """Read the raw data from a file object.""" + go_away = file_obj.read(len(cls._GO_AWAY)) + if go_away != cls._GO_AWAY: + raise CoverageException("Doesn't seem to be a coverage.py data file") + return json.load(file_obj) + + @classmethod + def _read_raw_data_file(cls, filename): + """Read the raw data from a file, for debugging.""" + with cls._open_for_reading(filename) as f: + return cls._read_raw_data(f) + + ## + ## Writing data + ## + + def add_lines(self, line_data): + """Add measured line data. + + `line_data` is a dictionary mapping file names to dictionaries:: + + { filename: { lineno: None, ... }, ...} + + """ + if self._debug and self._debug.should('dataop'): + self._debug.write("Adding lines: %d files, %d lines total" % ( + len(line_data), sum(len(lines) for lines in line_data.values()) + )) + if self._has_arcs(): + raise CoverageException("Can't add lines to existing arc data") + + if self._lines is None: + self._lines = {} + for filename, linenos in iitems(line_data): + if filename in self._lines: + new_linenos = set(self._lines[filename]) + new_linenos.update(linenos) + linenos = new_linenos + self._lines[filename] = list(linenos) + + self._validate() + + def add_arcs(self, arc_data): + """Add measured arc data. + + `arc_data` is a dictionary mapping file names to dictionaries:: + + { filename: { (l1,l2): None, ... }, ...} + + """ + if self._debug and self._debug.should('dataop'): + self._debug.write("Adding arcs: %d files, %d arcs total" % ( + len(arc_data), sum(len(arcs) for arcs in arc_data.values()) + )) + if self._has_lines(): + raise CoverageException("Can't add arcs to existing line data") + + if self._arcs is None: + self._arcs = {} + for filename, arcs in iitems(arc_data): + if filename in self._arcs: + new_arcs = set(self._arcs[filename]) + new_arcs.update(arcs) + arcs = new_arcs + self._arcs[filename] = list(arcs) + + self._validate() + + def add_file_tracers(self, file_tracers): + """Add per-file plugin information. + + `file_tracers` is { filename: plugin_name, ... } + + """ + if self._debug and self._debug.should('dataop'): + self._debug.write("Adding file tracers: %d files" % (len(file_tracers),)) + + existing_files = self._arcs or self._lines or {} + for filename, plugin_name in iitems(file_tracers): + if filename not in existing_files: + raise CoverageException( + "Can't add file tracer data for unmeasured file '%s'" % (filename,) + ) + existing_plugin = self._file_tracers.get(filename) + if existing_plugin is not None and plugin_name != existing_plugin: + raise CoverageException( + "Conflicting file tracer name for '%s': %r vs %r" % ( + filename, existing_plugin, plugin_name, + ) + ) + self._file_tracers[filename] = plugin_name + + self._validate() + + def add_run_info(self, **kwargs): + """Add information about the run. + + Keywords are arbitrary, and are stored in the run dictionary. Values + must be JSON serializable. You may use this function more than once, + but repeated keywords overwrite each other. + + """ + if self._debug and self._debug.should('dataop'): + self._debug.write("Adding run info: %r" % (kwargs,)) + if not self._runs: + self._runs = [{}] + self._runs[0].update(kwargs) + self._validate() + + def touch_file(self, filename, plugin_name=""): + """Ensure that `filename` appears in the data, empty if needed. + + `plugin_name` is the name of the plugin resposible for this file. It is used + to associate the right filereporter, etc. + """ + if self._debug and self._debug.should('dataop'): + self._debug.write("Touching %r" % (filename,)) + if not self._has_arcs() and not self._has_lines(): + raise CoverageException("Can't touch files in an empty CoverageData") + + if self._has_arcs(): + where = self._arcs + else: + where = self._lines + where.setdefault(filename, []) + if plugin_name: + # Set the tracer for this file + self._file_tracers[filename] = plugin_name + + self._validate() + + def write_fileobj(self, file_obj): + """Write the coverage data to `file_obj`.""" + + # Create the file data. + file_data = {} + + if self._has_arcs(): + file_data['arcs'] = self._arcs + + if self._has_lines(): + file_data['lines'] = self._lines + + if self._file_tracers: + file_data['file_tracers'] = self._file_tracers + + if self._runs: + file_data['runs'] = self._runs + + # Write the data to the file. + file_obj.write(self._GO_AWAY) + json.dump(file_data, file_obj, separators=(',', ':')) + + def write_file(self, filename): + """Write the coverage data to `filename`.""" + if self._debug and self._debug.should('dataio'): + self._debug.write("Writing data to %r" % (filename,)) + with open(filename, 'w') as fdata: + self.write_fileobj(fdata) + + def erase(self): + """Erase the data in this object.""" + self._lines = None + self._arcs = None + self._file_tracers = {} + self._runs = [] + self._validate() + + def update(self, other_data, aliases=None): + """Update this data with data from another `CoverageData`. + + If `aliases` is provided, it's a `PathAliases` object that is used to + re-map paths to match the local machine's. + + """ + if self._has_lines() and other_data._has_arcs(): + raise CoverageException("Can't combine arc data with line data") + if self._has_arcs() and other_data._has_lines(): + raise CoverageException("Can't combine line data with arc data") + + aliases = aliases or PathAliases() + + # _file_tracers: only have a string, so they have to agree. + # Have to do these first, so that our examination of self._arcs and + # self._lines won't be confused by data updated from other_data. + for filename in other_data.measured_files(): + other_plugin = other_data.file_tracer(filename) + filename = aliases.map(filename) + this_plugin = self.file_tracer(filename) + if this_plugin is None: + if other_plugin: + self._file_tracers[filename] = other_plugin + elif this_plugin != other_plugin: + raise CoverageException( + "Conflicting file tracer name for '%s': %r vs %r" % ( + filename, this_plugin, other_plugin, + ) + ) + + # _runs: add the new runs to these runs. + self._runs.extend(other_data._runs) + + # _lines: merge dicts. + if other_data._has_lines(): + if self._lines is None: + self._lines = {} + for filename, file_lines in iitems(other_data._lines): + filename = aliases.map(filename) + if filename in self._lines: + lines = set(self._lines[filename]) + lines.update(file_lines) + file_lines = list(lines) + self._lines[filename] = file_lines + + # _arcs: merge dicts. + if other_data._has_arcs(): + if self._arcs is None: + self._arcs = {} + for filename, file_arcs in iitems(other_data._arcs): + filename = aliases.map(filename) + if filename in self._arcs: + arcs = set(self._arcs[filename]) + arcs.update(file_arcs) + file_arcs = list(arcs) + self._arcs[filename] = file_arcs + + self._validate() + + ## + ## Miscellaneous + ## + + def _validate(self): + """If we are in paranoid mode, validate that everything is right.""" + if env.TESTING: + self._validate_invariants() + + def _validate_invariants(self): + """Validate internal invariants.""" + # Only one of _lines or _arcs should exist. + assert not(self._has_lines() and self._has_arcs()), ( + "Shouldn't have both _lines and _arcs" + ) + + # _lines should be a dict of lists of ints. + if self._has_lines(): + for fname, lines in iitems(self._lines): + assert isinstance(fname, string_class), "Key in _lines shouldn't be %r" % (fname,) + assert all(isinstance(x, int) for x in lines), ( + "_lines[%r] shouldn't be %r" % (fname, lines) + ) + + # _arcs should be a dict of lists of pairs of ints. + if self._has_arcs(): + for fname, arcs in iitems(self._arcs): + assert isinstance(fname, string_class), "Key in _arcs shouldn't be %r" % (fname,) + assert all(isinstance(x, int) and isinstance(y, int) for x, y in arcs), ( + "_arcs[%r] shouldn't be %r" % (fname, arcs) + ) + + # _file_tracers should have only non-empty strings as values. + for fname, plugin in iitems(self._file_tracers): + assert isinstance(fname, string_class), ( + "Key in _file_tracers shouldn't be %r" % (fname,) + ) + assert plugin and isinstance(plugin, string_class), ( + "_file_tracers[%r] shoudn't be %r" % (fname, plugin) + ) + + # _runs should be a list of dicts. + for val in self._runs: + assert isinstance(val, dict) + for key in val: + assert isinstance(key, string_class), "Key in _runs shouldn't be %r" % (key,) + + def add_to_hash(self, filename, hasher): + """Contribute `filename`'s data to the `hasher`. + + `hasher` is a `coverage.misc.Hasher` instance to be updated with + the file's data. It should only get the results data, not the run + data. + + """ + if self._has_arcs(): + hasher.update(sorted(self.arcs(filename) or [])) + else: + hasher.update(sorted(self.lines(filename) or [])) + hasher.update(self.file_tracer(filename)) + + ## + ## Internal + ## + + def _has_lines(self): + """Do we have data in self._lines?""" + return self._lines is not None + + def _has_arcs(self): + """Do we have data in self._arcs?""" + return self._arcs is not None + + +class CoverageDataFiles(object): + """Manage the use of coverage data files.""" + + def __init__(self, basename=None, warn=None, debug=None): + """Create a CoverageDataFiles to manage data files. + + `warn` is the warning function to use. + + `basename` is the name of the file to use for storing data. + + `debug` is a `DebugControl` object for writing debug messages. + + """ + self.warn = warn + self.debug = debug + + # Construct the file name that will be used for data storage. + self.filename = os.path.abspath(basename or ".coverage") + + def erase(self, parallel=False): + """Erase the data from the file storage. + + If `parallel` is true, then also deletes data files created from the + basename by parallel-mode. + + """ + if self.debug and self.debug.should('dataio'): + self.debug.write("Erasing data file %r" % (self.filename,)) + file_be_gone(self.filename) + if parallel: + data_dir, local = os.path.split(self.filename) + localdot = local + '.*' + pattern = os.path.join(os.path.abspath(data_dir), localdot) + for filename in glob.glob(pattern): + if self.debug and self.debug.should('dataio'): + self.debug.write("Erasing parallel data file %r" % (filename,)) + file_be_gone(filename) + + def read(self, data): + """Read the coverage data.""" + if os.path.exists(self.filename): + data.read_file(self.filename) + + def write(self, data, suffix=None): + """Write the collected coverage data to a file. + + `suffix` is a suffix to append to the base file name. This can be used + for multiple or parallel execution, so that many coverage data files + can exist simultaneously. A dot will be used to join the base name and + the suffix. + + """ + filename = self.filename + if suffix is True: + # If data_suffix was a simple true value, then make a suffix with + # plenty of distinguishing information. We do this here in + # `save()` at the last minute so that the pid will be correct even + # if the process forks. + extra = "" + if _TEST_NAME_FILE: # pragma: debugging + with open(_TEST_NAME_FILE) as f: + test_name = f.read() + extra = "." + test_name + dice = random.Random(os.urandom(8)).randint(0, 999999) + suffix = "%s%s.%s.%06d" % (socket.gethostname(), extra, os.getpid(), dice) + + if suffix: + filename += "." + suffix + data.write_file(filename) + + def combine_parallel_data(self, data, aliases=None, data_paths=None, strict=False): + """Combine a number of data files together. + + Treat `self.filename` as a file prefix, and combine the data from all + of the data files starting with that prefix plus a dot. + + If `aliases` is provided, it's a `PathAliases` object that is used to + re-map paths to match the local machine's. + + If `data_paths` is provided, it is a list of directories or files to + combine. Directories are searched for files that start with + `self.filename` plus dot as a prefix, and those files are combined. + + If `data_paths` is not provided, then the directory portion of + `self.filename` is used as the directory to search for data files. + + Every data file found and combined is then deleted from disk. If a file + cannot be read, a warning will be issued, and the file will not be + deleted. + + If `strict` is true, and no files are found to combine, an error is + raised. + + """ + # Because of the os.path.abspath in the constructor, data_dir will + # never be an empty string. + data_dir, local = os.path.split(self.filename) + localdot = local + '.*' + + data_paths = data_paths or [data_dir] + files_to_combine = [] + for p in data_paths: + if os.path.isfile(p): + files_to_combine.append(os.path.abspath(p)) + elif os.path.isdir(p): + pattern = os.path.join(os.path.abspath(p), localdot) + files_to_combine.extend(glob.glob(pattern)) + else: + raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,)) + + if strict and not files_to_combine: + raise CoverageException("No data to combine") + + files_combined = 0 + for f in files_to_combine: + new_data = CoverageData(debug=self.debug) + try: + new_data.read_file(f) + except CoverageException as exc: + if self.warn: + # The CoverageException has the file name in it, so just + # use the message as the warning. + self.warn(str(exc)) + else: + data.update(new_data, aliases=aliases) + files_combined += 1 + if self.debug and self.debug.should('dataio'): + self.debug.write("Deleting combined data file %r" % (f,)) + file_be_gone(f) + + if strict and not files_combined: + raise CoverageException("No usable data files") + + +def canonicalize_json_data(data): + """Canonicalize our JSON data so it can be compared.""" + for fname, lines in iitems(data.get('lines', {})): + data['lines'][fname] = sorted(lines) + for fname, arcs in iitems(data.get('arcs', {})): + data['arcs'][fname] = sorted(arcs) + + +def pretty_data(data): + """Format data as JSON, but as nicely as possible. + + Returns a string. + + """ + # Start with a basic JSON dump. + out = json.dumps(data, indent=4, sort_keys=True) + # But pairs of numbers shouldn't be split across lines... + out = re.sub(r"\[\s+(-?\d+),\s+(-?\d+)\s+]", r"[\1, \2]", out) + # Trailing spaces mess with tests, get rid of them. + out = re.sub(r"(?m)\s+$", "", out) + return out + + +def debug_main(args): + """Dump the raw data from data files. + + Run this as:: + + $ python -m coverage.data [FILE] + + """ + parser = optparse.OptionParser() + parser.add_option( + "-c", "--canonical", action="store_true", + help="Sort data into a canonical order", + ) + options, args = parser.parse_args(args) + + for filename in (args or [".coverage"]): + print("--- {0} ------------------------------".format(filename)) + data = CoverageData._read_raw_data_file(filename) + if options.canonical: + canonicalize_json_data(data) + print(pretty_data(data)) + + +if __name__ == '__main__': + import sys + debug_main(sys.argv[1:]) diff --git a/env_27/lib/python2.7/site-packages/coverage/debug.py b/env_27/lib/python2.7/site-packages/coverage/debug.py new file mode 100644 index 0000000..e68736f --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/debug.py @@ -0,0 +1,295 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Control of and utilities for debugging.""" + +import contextlib +import inspect +import os +import re +import sys +try: + import _thread +except ImportError: + import thread as _thread + +from coverage.backward import StringIO +from coverage.misc import isolate_module + +os = isolate_module(os) + + +# When debugging, it can be helpful to force some options, especially when +# debugging the configuration mechanisms you usually use to control debugging! +# This is a list of forced debugging options. +FORCED_DEBUG = [] + +# A hack for debugging testing in sub-processes. +_TEST_NAME_FILE = "" # "/tmp/covtest.txt" + + +class DebugControl(object): + """Control and output for debugging.""" + + def __init__(self, options, output): + """Configure the options and output file for debugging.""" + self.options = list(options) + FORCED_DEBUG + self.raw_output = output + self.suppress_callers = False + + filters = [] + if self.should('pid'): + filters.append(add_pid_and_tid) + self.output = DebugOutputFile( + self.raw_output, + show_process=self.should('process'), + filters=filters, + ) + + def __repr__(self): + return "" % (self.options, self.raw_output) + + def should(self, option): + """Decide whether to output debug information in category `option`.""" + if option == "callers" and self.suppress_callers: + return False + return (option in self.options) + + @contextlib.contextmanager + def without_callers(self): + """A context manager to prevent call stacks from being logged.""" + old = self.suppress_callers + self.suppress_callers = True + try: + yield + finally: + self.suppress_callers = old + + def write(self, msg): + """Write a line of debug output. + + `msg` is the line to write. A newline will be appended. + + """ + self.output.write(msg+"\n") + if self.should('callers'): + dump_stack_frames(out=self.output, skip=1) + self.output.flush() + + +class DebugControlString(DebugControl): + """A `DebugControl` that writes to a StringIO, for testing.""" + def __init__(self, options): + super(DebugControlString, self).__init__(options, StringIO()) + + def get_output(self): + """Get the output text from the `DebugControl`.""" + return self.raw_output.getvalue() + + +def info_header(label): + """Make a nice header string.""" + return "--{0:-<60s}".format(" "+label+" ") + + +def info_formatter(info): + """Produce a sequence of formatted lines from info. + + `info` is a sequence of pairs (label, data). The produced lines are + nicely formatted, ready to print. + + """ + info = list(info) + if not info: + return + label_len = max(len(l) for l, _d in info) + for label, data in info: + if data == []: + data = "-none-" + if isinstance(data, (list, set, tuple)): + prefix = "%*s:" % (label_len, label) + for e in data: + yield "%*s %s" % (label_len+1, prefix, e) + prefix = "" + else: + yield "%*s: %s" % (label_len, label, data) + + +def write_formatted_info(writer, header, info): + """Write a sequence of (label,data) pairs nicely.""" + writer.write(info_header(header)) + for line in info_formatter(info): + writer.write(" %s" % line) + + +def short_stack(limit=None, skip=0): + """Return a string summarizing the call stack. + + The string is multi-line, with one line per stack frame. Each line shows + the function name, the file name, and the line number: + + ... + start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95 + import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81 + import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159 + ... + + `limit` is the number of frames to include, defaulting to all of them. + + `skip` is the number of frames to skip, so that debugging functions can + call this and not be included in the result. + + """ + stack = inspect.stack()[limit:skip:-1] + return "\n".join("%30s : %s @%d" % (t[3], t[1], t[2]) for t in stack) + + +def dump_stack_frames(limit=None, out=None, skip=0): + """Print a summary of the stack to stdout, or someplace else.""" + out = out or sys.stdout + out.write(short_stack(limit=limit, skip=skip+1)) + out.write("\n") + + +def short_id(id64): + """Given a 64-bit id, make a shorter 16-bit one.""" + id16 = 0 + for offset in range(0, 64, 16): + id16 ^= id64 >> offset + return id16 & 0xFFFF + + +def add_pid_and_tid(text): + """A filter to add pid and tid to debug messages.""" + # Thread ids are useful, but too long. Make a shorter one. + tid = "{0:04x}".format(short_id(_thread.get_ident())) + text = "{0:5d}.{1}: {2}".format(os.getpid(), tid, text) + return text + + +def filter_text(text, filters): + """Run `text` through a series of filters. + + `filters` is a list of functions. Each takes a string and returns a + string. Each is run in turn. + + Returns: the final string that results after all of the filters have + run. + + """ + clean_text = text.rstrip() + ending = text[len(clean_text):] + text = clean_text + for fn in filters: + lines = [] + for line in text.splitlines(): + lines.extend(fn(line).splitlines()) + text = "\n".join(lines) + return text + ending + + +class CwdTracker(object): # pragma: debugging + """A class to add cwd info to debug messages.""" + def __init__(self): + self.cwd = None + + def filter(self, text): + """Add a cwd message for each new cwd.""" + cwd = os.getcwd() + if cwd != self.cwd: + text = "cwd is now {0!r}\n".format(cwd) + text + self.cwd = cwd + return text + + +class DebugOutputFile(object): # pragma: debugging + """A file-like object that includes pid and cwd information.""" + def __init__(self, outfile, show_process, filters): + self.outfile = outfile + self.show_process = show_process + self.filters = list(filters) + + if self.show_process: + self.filters.append(CwdTracker().filter) + cmd = " ".join(getattr(sys, 'argv', ['???'])) + self.write("New process: executable: %s\n" % (sys.executable,)) + self.write("New process: cmd: %s\n" % (cmd,)) + if hasattr(os, 'getppid'): + self.write("New process: parent pid: %s\n" % (os.getppid(),)) + + SYS_MOD_NAME = '$coverage.debug.DebugOutputFile.the_one' + + @classmethod + def the_one(cls, fileobj=None, show_process=True, filters=()): + """Get the process-wide singleton DebugOutputFile. + + If it doesn't exist yet, then create it as a wrapper around the file + object `fileobj`. `show_process` controls whether the debug file adds + process-level information. + + """ + # Because of the way igor.py deletes and re-imports modules, + # this class can be defined more than once. But we really want + # a process-wide singleton. So stash it in sys.modules instead of + # on a class attribute. Yes, this is aggressively gross. + the_one = sys.modules.get(cls.SYS_MOD_NAME) + if the_one is None: + assert fileobj is not None + sys.modules[cls.SYS_MOD_NAME] = the_one = cls(fileobj, show_process, filters) + return the_one + + def write(self, text): + """Just like file.write, but filter through all our filters.""" + self.outfile.write(filter_text(text, self.filters)) + self.outfile.flush() + + def flush(self): + """Flush our file.""" + self.outfile.flush() + + +def log(msg, stack=False): # pragma: debugging + """Write a log message as forcefully as possible.""" + out = DebugOutputFile.the_one() + out.write(msg+"\n") + if stack: + dump_stack_frames(out=out, skip=1) + + +def filter_aspectlib_frames(text): # pragma: debugging + """Aspectlib prints stack traces, but includes its own frames. Scrub those out.""" + # <<< aspectlib/__init__.py:257:function_wrapper < igor.py:143:run_tests < ... + text = re.sub(r"(?<= )aspectlib/[^.]+\.py:\d+:\w+ < ", "", text) + return text + + +def enable_aspectlib_maybe(): # pragma: debugging + """For debugging, we can use aspectlib to trace execution. + + Define COVERAGE_ASPECTLIB to enable and configure aspectlib to trace + execution:: + + $ export COVERAGE_LOG=covaspect.txt + $ export COVERAGE_ASPECTLIB=coverage.Coverage:coverage.data.CoverageData + $ coverage run blah.py ... + + This will trace all the public methods on Coverage and CoverageData, + writing the information to covaspect.txt. + + """ + aspects = os.environ.get("COVERAGE_ASPECTLIB", "") + if not aspects: + return + + import aspectlib # pylint: disable=import-error + import aspectlib.debug # pylint: disable=import-error + + filename = os.environ.get("COVERAGE_LOG", "/tmp/covlog.txt") + filters = [add_pid_and_tid, filter_aspectlib_frames] + aspects_file = DebugOutputFile.the_one(open(filename, "a"), show_process=True, filters=filters) + aspect_log = aspectlib.debug.log( + print_to=aspects_file, attributes=['id'], stacktrace=30, use_logging=False + ) + public_methods = re.compile(r'^(__init__|[a-zA-Z].*)$') + for aspect in aspects.split(':'): + aspectlib.weave(aspect, aspect_log, methods=public_methods) diff --git a/env_27/lib/python2.7/site-packages/coverage/env.py b/env_27/lib/python2.7/site-packages/coverage/env.py new file mode 100644 index 0000000..9f90c41 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/env.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Determine facts about the environment.""" + +import os +import platform +import sys + +# Operating systems. +WINDOWS = sys.platform == "win32" +LINUX = sys.platform == "linux2" + +# Python implementations. +PYPY = (platform.python_implementation() == 'PyPy') +if PYPY: + PYPYVERSION = sys.pypy_version_info + +JYTHON = (platform.python_implementation() == 'Jython') +IRONPYTHON = (platform.python_implementation() == 'IronPython') + +# Python versions. +PYVERSION = sys.version_info +PY2 = PYVERSION < (3, 0) +PY3 = PYVERSION >= (3, 0) + +# Python behavior +class PYBEHAVIOR(object): + """Flags indicating this Python's behavior.""" + + # When a break/continue/return statement in a try block jumps to a finally + # block, does the finally block do the break/continue/return (pre-3.8), or + # does the finally jump back to the break/continue/return (3.8) to do the + # work? + finally_jumps_back = (PYVERSION >= (3, 8)) + + # When a function is decorated, does the trace function get called for the + # @-line and also the def-line (new behavior in 3.8)? Or just the @-line + # (old behavior)? + trace_decorated_def = (PYVERSION >= (3, 8)) + + # Are while-true loops optimized into absolute jumps with no loop setup? + nix_while_true = (PYVERSION >= (3, 8)) + +# Coverage.py specifics. + +# Are we using the C-implemented trace function? +C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c' + +# Are we coverage-measuring ourselves? +METACOV = os.getenv('COVERAGE_COVERAGE', '') != '' + +# Are we running our test suite? +# Even when running tests, you can use COVERAGE_TESTING=0 to disable the +# test-specific behavior like contracts. +TESTING = os.getenv('COVERAGE_TESTING', '') == 'True' diff --git a/env_27/lib/python2.7/site-packages/coverage/execfile.py b/env_27/lib/python2.7/site-packages/coverage/execfile.py new file mode 100644 index 0000000..a72cb71 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/execfile.py @@ -0,0 +1,282 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Execute files of Python code.""" + +import marshal +import os +import struct +import sys +import types + +from coverage.backward import BUILTINS +from coverage.backward import PYC_MAGIC_NUMBER, imp, importlib_util_find_spec +from coverage.misc import CoverageException, ExceptionDuringRun, NoCode, NoSource, isolate_module +from coverage.phystokens import compile_unicode +from coverage.python import get_python_source + +os = isolate_module(os) + + +class DummyLoader(object): + """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader. + + Currently only implements the .fullname attribute + """ + def __init__(self, fullname, *_args): + self.fullname = fullname + + +if importlib_util_find_spec: + def find_module(modulename): + """Find the module named `modulename`. + + Returns the file path of the module, and the name of the enclosing + package. + """ + try: + spec = importlib_util_find_spec(modulename) + except ImportError as err: + raise NoSource(str(err)) + if not spec: + raise NoSource("No module named %r" % (modulename,)) + pathname = spec.origin + packagename = spec.name + if pathname.endswith("__init__.py") and not modulename.endswith("__init__"): + mod_main = modulename + ".__main__" + spec = importlib_util_find_spec(mod_main) + if not spec: + raise NoSource( + "No module named %s; " + "%r is a package and cannot be directly executed" + % (mod_main, modulename) + ) + pathname = spec.origin + packagename = spec.name + packagename = packagename.rpartition(".")[0] + return pathname, packagename +else: + def find_module(modulename): + """Find the module named `modulename`. + + Returns the file path of the module, and the name of the enclosing + package. + """ + openfile = None + glo, loc = globals(), locals() + try: + # Search for the module - inside its parent package, if any - using + # standard import mechanics. + if '.' in modulename: + packagename, name = modulename.rsplit('.', 1) + package = __import__(packagename, glo, loc, ['__path__']) + searchpath = package.__path__ + else: + packagename, name = None, modulename + searchpath = None # "top-level search" in imp.find_module() + openfile, pathname, _ = imp.find_module(name, searchpath) + + # Complain if this is a magic non-file module. + if openfile is None and pathname is None: + raise NoSource( + "module does not live in a file: %r" % modulename + ) + + # If `modulename` is actually a package, not a mere module, then we + # pretend to be Python 2.7 and try running its __main__.py script. + if openfile is None: + packagename = modulename + name = '__main__' + package = __import__(packagename, glo, loc, ['__path__']) + searchpath = package.__path__ + openfile, pathname, _ = imp.find_module(name, searchpath) + except ImportError as err: + raise NoSource(str(err)) + finally: + if openfile: + openfile.close() + + return pathname, packagename + + +def run_python_module(modulename, args): + """Run a Python module, as though with ``python -m name args...``. + + `modulename` is the name of the module, possibly a dot-separated name. + `args` is the argument array to present as sys.argv, including the first + element naming the module being executed. + + """ + pathname, packagename = find_module(modulename) + + pathname = os.path.abspath(pathname) + args[0] = pathname + # Python 3.7.0b3 changed the behavior of the sys.path[0] entry for -m. It + # used to be an empty string (meaning the current directory). It changed + # to be the actual path to the current directory, so that os.chdir wouldn't + # affect the outcome. + if sys.version_info >= (3, 7, 0, 'beta', 3): + path0 = os.getcwd() + else: + path0 = "" + run_python_file(pathname, args, package=packagename, modulename=modulename, path0=path0) + + +def run_python_file(filename, args, package=None, modulename=None, path0=None): + """Run a Python file as if it were the main program on the command line. + + `filename` is the path to the file to execute, it need not be a .py file. + `args` is the argument array to present as sys.argv, including the first + element naming the file being executed. `package` is the name of the + enclosing package, if any. + + `modulename` is the name of the module the file was run as. + + `path0` is the value to put into sys.path[0]. If it's None, then this + function will decide on a value. + + """ + if modulename is None and sys.version_info >= (3, 3): + modulename = '__main__' + + # Create a module to serve as __main__ + old_main_mod = sys.modules['__main__'] + main_mod = types.ModuleType('__main__') + sys.modules['__main__'] = main_mod + main_mod.__file__ = filename + if package: + main_mod.__package__ = package + if modulename: + main_mod.__loader__ = DummyLoader(modulename) + + main_mod.__builtins__ = BUILTINS + + # Set sys.argv properly. + old_argv = sys.argv + sys.argv = args + + if os.path.isdir(filename): + # Running a directory means running the __main__.py file in that + # directory. + my_path0 = filename + + for ext in [".py", ".pyc", ".pyo"]: + try_filename = os.path.join(filename, "__main__" + ext) + if os.path.exists(try_filename): + filename = try_filename + break + else: + raise NoSource("Can't find '__main__' module in '%s'" % filename) + else: + my_path0 = os.path.abspath(os.path.dirname(filename)) + + # Set sys.path correctly. + old_path0 = sys.path[0] + sys.path[0] = path0 if path0 is not None else my_path0 + + try: + try: + # Make a code object somehow. + if filename.endswith((".pyc", ".pyo")): + code = make_code_from_pyc(filename) + else: + code = make_code_from_py(filename) + except CoverageException: + raise + except Exception as exc: + msg = "Couldn't run {filename!r} as Python code: {exc.__class__.__name__}: {exc}" + raise CoverageException(msg.format(filename=filename, exc=exc)) + + # Execute the code object. + try: + exec(code, main_mod.__dict__) + except SystemExit: + # The user called sys.exit(). Just pass it along to the upper + # layers, where it will be handled. + raise + except Exception: + # Something went wrong while executing the user code. + # Get the exc_info, and pack them into an exception that we can + # throw up to the outer loop. We peel one layer off the traceback + # so that the coverage.py code doesn't appear in the final printed + # traceback. + typ, err, tb = sys.exc_info() + + # PyPy3 weirdness. If I don't access __context__, then somehow it + # is non-None when the exception is reported at the upper layer, + # and a nested exception is shown to the user. This getattr fixes + # it somehow? https://bitbucket.org/pypy/pypy/issue/1903 + getattr(err, '__context__', None) + + # Call the excepthook. + try: + if hasattr(err, "__traceback__"): + err.__traceback__ = err.__traceback__.tb_next + sys.excepthook(typ, err, tb.tb_next) + except SystemExit: + raise + except Exception: + # Getting the output right in the case of excepthook + # shenanigans is kind of involved. + sys.stderr.write("Error in sys.excepthook:\n") + typ2, err2, tb2 = sys.exc_info() + err2.__suppress_context__ = True + if hasattr(err2, "__traceback__"): + err2.__traceback__ = err2.__traceback__.tb_next + sys.__excepthook__(typ2, err2, tb2.tb_next) + sys.stderr.write("\nOriginal exception was:\n") + raise ExceptionDuringRun(typ, err, tb.tb_next) + else: + sys.exit(1) + + finally: + # Restore the old __main__, argv, and path. + sys.modules['__main__'] = old_main_mod + sys.argv = old_argv + sys.path[0] = old_path0 + + +def make_code_from_py(filename): + """Get source from `filename` and make a code object of it.""" + # Open the source file. + try: + source = get_python_source(filename) + except (IOError, NoSource): + raise NoSource("No file to run: '%s'" % filename) + + code = compile_unicode(source, filename, "exec") + return code + + +def make_code_from_pyc(filename): + """Get a code object from a .pyc file.""" + try: + fpyc = open(filename, "rb") + except IOError: + raise NoCode("No file to run: '%s'" % filename) + + with fpyc: + # First four bytes are a version-specific magic number. It has to + # match or we won't run the file. + magic = fpyc.read(4) + if magic != PYC_MAGIC_NUMBER: + raise NoCode("Bad magic number in .pyc file") + + date_based = True + if sys.version_info >= (3, 7, 0, 'alpha', 4): + flags = struct.unpack('= (3, 3): + # 3.3 added another long to the header (size), skip it. + fpyc.read(4) + + # The rest of the file is the code object we want. + code = marshal.load(fpyc) + + return code diff --git a/env_27/lib/python2.7/site-packages/coverage/files.py b/env_27/lib/python2.7/site-packages/coverage/files.py new file mode 100644 index 0000000..7ab60e4 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/files.py @@ -0,0 +1,423 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""File wrangling.""" + +import hashlib +import fnmatch +import ntpath +import os +import os.path +import posixpath +import re +import sys + +from coverage import env +from coverage.backward import unicode_class +from coverage.misc import contract, CoverageException, join_regex, isolate_module + + +os = isolate_module(os) + + +def set_relative_directory(): + """Set the directory that `relative_filename` will be relative to.""" + global RELATIVE_DIR, CANONICAL_FILENAME_CACHE + + # The absolute path to our current directory. + RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep) + + # Cache of results of calling the canonical_filename() method, to + # avoid duplicating work. + CANONICAL_FILENAME_CACHE = {} + + +def relative_directory(): + """Return the directory that `relative_filename` is relative to.""" + return RELATIVE_DIR + + +@contract(returns='unicode') +def relative_filename(filename): + """Return the relative form of `filename`. + + The file name will be relative to the current directory when the + `set_relative_directory` was called. + + """ + fnorm = os.path.normcase(filename) + if fnorm.startswith(RELATIVE_DIR): + filename = filename[len(RELATIVE_DIR):] + return unicode_filename(filename) + + +@contract(returns='unicode') +def canonical_filename(filename): + """Return a canonical file name for `filename`. + + An absolute path with no redundant components and normalized case. + + """ + if filename not in CANONICAL_FILENAME_CACHE: + if not os.path.isabs(filename): + for path in [os.curdir] + sys.path: + if path is None: + continue + f = os.path.join(path, filename) + try: + exists = os.path.exists(f) + except UnicodeError: + exists = False + if exists: + filename = f + break + cf = abs_file(filename) + CANONICAL_FILENAME_CACHE[filename] = cf + return CANONICAL_FILENAME_CACHE[filename] + + +MAX_FLAT = 200 + +@contract(filename='unicode', returns='unicode') +def flat_rootname(filename): + """A base for a flat file name to correspond to this file. + + Useful for writing files about the code where you want all the files in + the same directory, but need to differentiate same-named files from + different directories. + + For example, the file a/b/c.py will return 'a_b_c_py' + + """ + name = ntpath.splitdrive(filename)[1] + name = re.sub(r"[\\/.:]", "_", name) + if len(name) > MAX_FLAT: + h = hashlib.sha1(name.encode('UTF-8')).hexdigest() + name = name[-(MAX_FLAT-len(h)-1):] + '_' + h + return name + + +if env.WINDOWS: + + _ACTUAL_PATH_CACHE = {} + _ACTUAL_PATH_LIST_CACHE = {} + + def actual_path(path): + """Get the actual path of `path`, including the correct case.""" + if env.PY2 and isinstance(path, unicode_class): + path = path.encode(sys.getfilesystemencoding()) + if path in _ACTUAL_PATH_CACHE: + return _ACTUAL_PATH_CACHE[path] + + head, tail = os.path.split(path) + if not tail: + # This means head is the drive spec: normalize it. + actpath = head.upper() + elif not head: + actpath = tail + else: + head = actual_path(head) + if head in _ACTUAL_PATH_LIST_CACHE: + files = _ACTUAL_PATH_LIST_CACHE[head] + else: + try: + files = os.listdir(head) + except OSError: + files = [] + _ACTUAL_PATH_LIST_CACHE[head] = files + normtail = os.path.normcase(tail) + for f in files: + if os.path.normcase(f) == normtail: + tail = f + break + actpath = os.path.join(head, tail) + _ACTUAL_PATH_CACHE[path] = actpath + return actpath + +else: + def actual_path(filename): + """The actual path for non-Windows platforms.""" + return filename + + +if env.PY2: + @contract(returns='unicode') + def unicode_filename(filename): + """Return a Unicode version of `filename`.""" + if isinstance(filename, str): + encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() + filename = filename.decode(encoding, "replace") + return filename +else: + @contract(filename='unicode', returns='unicode') + def unicode_filename(filename): + """Return a Unicode version of `filename`.""" + return filename + + +@contract(returns='unicode') +def abs_file(filename): + """Return the absolute normalized form of `filename`.""" + path = os.path.expandvars(os.path.expanduser(filename)) + try: + path = os.path.realpath(path) + except UnicodeError: + pass + path = os.path.abspath(path) + path = actual_path(path) + path = unicode_filename(path) + return path + + +RELATIVE_DIR = None +CANONICAL_FILENAME_CACHE = None +set_relative_directory() + + +def isabs_anywhere(filename): + """Is `filename` an absolute path on any OS?""" + return ntpath.isabs(filename) or posixpath.isabs(filename) + + +def prep_patterns(patterns): + """Prepare the file patterns for use in a `FnmatchMatcher`. + + If a pattern starts with a wildcard, it is used as a pattern + as-is. If it does not start with a wildcard, then it is made + absolute with the current directory. + + If `patterns` is None, an empty list is returned. + + """ + prepped = [] + for p in patterns or []: + if p.startswith(("*", "?")): + prepped.append(p) + else: + prepped.append(abs_file(p)) + return prepped + + +class TreeMatcher(object): + """A matcher for files in a tree. + + Construct with a list of paths, either files or directories. Paths match + with the `match` method if they are one of the files, or if they are + somewhere in a subtree rooted at one of the directories. + + """ + def __init__(self, paths): + self.paths = list(paths) + + def __repr__(self): + return "" % self.paths + + def info(self): + """A list of strings for displaying when dumping state.""" + return self.paths + + def match(self, fpath): + """Does `fpath` indicate a file in one of our trees?""" + for p in self.paths: + if fpath.startswith(p): + if fpath == p: + # This is the same file! + return True + if fpath[len(p)] == os.sep: + # This is a file in the directory + return True + return False + + +class ModuleMatcher(object): + """A matcher for modules in a tree.""" + def __init__(self, module_names): + self.modules = list(module_names) + + def __repr__(self): + return "" % (self.modules) + + def info(self): + """A list of strings for displaying when dumping state.""" + return self.modules + + def match(self, module_name): + """Does `module_name` indicate a module in one of our packages?""" + if not module_name: + return False + + for m in self.modules: + if module_name.startswith(m): + if module_name == m: + return True + if module_name[len(m)] == '.': + # This is a module in the package + return True + + return False + + +class FnmatchMatcher(object): + """A matcher for files by file name pattern.""" + def __init__(self, pats): + self.pats = list(pats) + self.re = fnmatches_to_regex(self.pats, case_insensitive=env.WINDOWS) + + def __repr__(self): + return "" % self.pats + + def info(self): + """A list of strings for displaying when dumping state.""" + return self.pats + + def match(self, fpath): + """Does `fpath` match one of our file name patterns?""" + return self.re.match(fpath) is not None + + +def sep(s): + """Find the path separator used in this string, or os.sep if none.""" + sep_match = re.search(r"[\\/]", s) + if sep_match: + the_sep = sep_match.group(0) + else: + the_sep = os.sep + return the_sep + + +def fnmatches_to_regex(patterns, case_insensitive=False, partial=False): + """Convert fnmatch patterns to a compiled regex that matches any of them. + + Slashes are always converted to match either slash or backslash, for + Windows support, even when running elsewhere. + + If `partial` is true, then the pattern will match if the target string + starts with the pattern. Otherwise, it must match the entire string. + + Returns: a compiled regex object. Use the .match method to compare target + strings. + + """ + regexes = (fnmatch.translate(pattern) for pattern in patterns) + # Python3.7 fnmatch translates "/" as "/". Before that, it translates as "\/", + # so we have to deal with maybe a backslash. + regexes = (re.sub(r"\\?/", r"[\\\\/]", regex) for regex in regexes) + + if partial: + # fnmatch always adds a \Z to match the whole string, which we don't + # want, so we remove the \Z. While removing it, we only replace \Z if + # followed by paren (introducing flags), or at end, to keep from + # destroying a literal \Z in the pattern. + regexes = (re.sub(r'\\Z(\(\?|$)', r'\1', regex) for regex in regexes) + + flags = 0 + if case_insensitive: + flags |= re.IGNORECASE + compiled = re.compile(join_regex(regexes), flags=flags) + + return compiled + + +class PathAliases(object): + """A collection of aliases for paths. + + When combining data files from remote machines, often the paths to source + code are different, for example, due to OS differences, or because of + serialized checkouts on continuous integration machines. + + A `PathAliases` object tracks a list of pattern/result pairs, and can + map a path through those aliases to produce a unified path. + + """ + def __init__(self): + self.aliases = [] + + def pprint(self): # pragma: debugging + """Dump the important parts of the PathAliases, for debugging.""" + for regex, result in self.aliases: + print("{0!r} --> {1!r}".format(regex.pattern, result)) + + def add(self, pattern, result): + """Add the `pattern`/`result` pair to the list of aliases. + + `pattern` is an `fnmatch`-style pattern. `result` is a simple + string. When mapping paths, if a path starts with a match against + `pattern`, then that match is replaced with `result`. This models + isomorphic source trees being rooted at different places on two + different machines. + + `pattern` can't end with a wildcard component, since that would + match an entire tree, and not just its root. + + """ + if len(pattern) > 1: + pattern = pattern.rstrip(r"\/") + + # The pattern can't end with a wildcard component. + if pattern.endswith("*"): + raise CoverageException("Pattern must not end with wildcards.") + pattern_sep = sep(pattern) + + # The pattern is meant to match a filepath. Let's make it absolute + # unless it already is, or is meant to match any prefix. + if not pattern.startswith('*') and not isabs_anywhere(pattern): + pattern = abs_file(pattern) + if not pattern.endswith(pattern_sep): + pattern += pattern_sep + + # Make a regex from the pattern. + regex = fnmatches_to_regex([pattern], case_insensitive=True, partial=True) + + # Normalize the result: it must end with a path separator. + result_sep = sep(result) + result = result.rstrip(r"\/") + result_sep + self.aliases.append((regex, result)) + + def map(self, path): + """Map `path` through the aliases. + + `path` is checked against all of the patterns. The first pattern to + match is used to replace the root of the path with the result root. + Only one pattern is ever used. If no patterns match, `path` is + returned unchanged. + + The separator style in the result is made to match that of the result + in the alias. + + Returns the mapped path. If a mapping has happened, this is a + canonical path. If no mapping has happened, it is the original value + of `path` unchanged. + + """ + for regex, result in self.aliases: + m = regex.match(path) + if m: + new = path.replace(m.group(0), result) + new = new.replace(sep(path), sep(result)) + new = canonical_filename(new) + return new + return path + + +def find_python_files(dirname): + """Yield all of the importable Python files in `dirname`, recursively. + + To be importable, the files have to be in a directory with a __init__.py, + except for `dirname` itself, which isn't required to have one. The + assumption is that `dirname` was specified directly, so the user knows + best, but sub-directories are checked for a __init__.py to be sure we only + find the importable files. + + """ + for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): + if i > 0 and '__init__.py' not in filenames: + # If a directory doesn't have __init__.py, then it isn't + # importable and neither are its files + del dirnames[:] + continue + for filename in filenames: + # We're only interested in files that look like reasonable Python + # files: Must end with .py or .pyw, and must not have certain funny + # characters that probably mean they are editor junk. + if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): + yield os.path.join(dirpath, filename) diff --git a/env_27/lib/python2.7/site-packages/coverage/fullcoverage/encodings.py b/env_27/lib/python2.7/site-packages/coverage/fullcoverage/encodings.py new file mode 100644 index 0000000..699f386 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/fullcoverage/encodings.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Imposter encodings module that installs a coverage-style tracer. + +This is NOT the encodings module; it is an imposter that sets up tracing +instrumentation and then replaces itself with the real encodings module. + +If the directory that holds this file is placed first in the PYTHONPATH when +using "coverage" to run Python's tests, then this file will become the very +first module imported by the internals of Python 3. It installs a +coverage.py-compatible trace function that can watch Standard Library modules +execute from the very earliest stages of Python's own boot process. This fixes +a problem with coverage.py - that it starts too late to trace the coverage of +many of the most fundamental modules in the Standard Library. + +""" + +import sys + +class FullCoverageTracer(object): + def __init__(self): + # `traces` is a list of trace events. Frames are tricky: the same + # frame object is used for a whole scope, with new line numbers + # written into it. So in one scope, all the frame objects are the + # same object, and will eventually all will point to the last line + # executed. So we keep the line numbers alongside the frames. + # The list looks like: + # + # traces = [ + # ((frame, event, arg), lineno), ... + # ] + # + self.traces = [] + + def fullcoverage_trace(self, *args): + frame, event, arg = args + self.traces.append((args, frame.f_lineno)) + return self.fullcoverage_trace + +sys.settrace(FullCoverageTracer().fullcoverage_trace) + +# In coverage/files.py is actual_filename(), which uses glob.glob. I don't +# understand why, but that use of glob borks everything if fullcoverage is in +# effect. So here we make an ugly hail-mary pass to switch off glob.glob over +# there. This means when using fullcoverage, Windows path names will not be +# their actual case. + +#sys.fullcoverage = True + +# Finally, remove our own directory from sys.path; remove ourselves from +# sys.modules; and re-import "encodings", which will be the real package +# this time. Note that the delete from sys.modules dictionary has to +# happen last, since all of the symbols in this module will become None +# at that exact moment, including "sys". + +parentdir = max(filter(__file__.startswith, sys.path), key=len) +sys.path.remove(parentdir) +del sys.modules['encodings'] +import encodings diff --git a/env_27/lib/python2.7/site-packages/coverage/html.py b/env_27/lib/python2.7/site-packages/coverage/html.py new file mode 100644 index 0000000..b0c6164 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/html.py @@ -0,0 +1,447 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""HTML reporting for coverage.py.""" + +import datetime +import json +import os +import shutil + +import coverage +from coverage import env +from coverage.backward import iitems +from coverage.files import flat_rootname +from coverage.misc import CoverageException, file_be_gone, Hasher, isolate_module +from coverage.report import Reporter +from coverage.results import Numbers +from coverage.templite import Templite + +os = isolate_module(os) + + +# Static files are looked for in a list of places. +STATIC_PATH = [ + # The place Debian puts system Javascript libraries. + "/usr/share/javascript", + + # Our htmlfiles directory. + os.path.join(os.path.dirname(__file__), "htmlfiles"), +] + + +def data_filename(fname, pkgdir=""): + """Return the path to a data file of ours. + + The file is searched for on `STATIC_PATH`, and the first place it's found, + is returned. + + Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir` + is provided, at that sub-directory. + + """ + tried = [] + for static_dir in STATIC_PATH: + static_filename = os.path.join(static_dir, fname) + if os.path.exists(static_filename): + return static_filename + else: + tried.append(static_filename) + if pkgdir: + static_filename = os.path.join(static_dir, pkgdir, fname) + if os.path.exists(static_filename): + return static_filename + else: + tried.append(static_filename) + raise CoverageException( + "Couldn't find static file %r from %r, tried: %r" % (fname, os.getcwd(), tried) + ) + + +def read_data(fname): + """Return the contents of a data file of ours.""" + with open(data_filename(fname)) as data_file: + return data_file.read() + + +def write_html(fname, html): + """Write `html` to `fname`, properly encoded.""" + with open(fname, "wb") as fout: + fout.write(html.encode('ascii', 'xmlcharrefreplace')) + + +class HtmlReporter(Reporter): + """HTML reporting.""" + + # These files will be copied from the htmlfiles directory to the output + # directory. + STATIC_FILES = [ + ("style.css", ""), + ("jquery.min.js", "jquery"), + ("jquery.ba-throttle-debounce.min.js", "jquery-throttle-debounce"), + ("jquery.hotkeys.js", "jquery-hotkeys"), + ("jquery.isonscreen.js", "jquery-isonscreen"), + ("jquery.tablesorter.min.js", "jquery-tablesorter"), + ("coverage_html.js", ""), + ("keybd_closed.png", ""), + ("keybd_open.png", ""), + ] + + def __init__(self, cov, config): + super(HtmlReporter, self).__init__(cov, config) + self.directory = None + title = self.config.html_title + if env.PY2: + title = title.decode("utf8") + self.template_globals = { + 'escape': escape, + 'pair': pair, + 'title': title, + '__url__': coverage.__url__, + '__version__': coverage.__version__, + } + self.source_tmpl = Templite(read_data("pyfile.html"), self.template_globals) + + self.coverage = cov + + self.files = [] + self.all_files_nums = [] + self.has_arcs = self.coverage.data.has_arcs() + self.status = HtmlStatus() + self.extra_css = None + self.totals = Numbers() + self.time_stamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') + + def report(self, morfs): + """Generate an HTML report for `morfs`. + + `morfs` is a list of modules or file names. + + """ + assert self.config.html_dir, "must give a directory for html reporting" + + # Read the status data. + self.status.read(self.config.html_dir) + + # Check that this run used the same settings as the last run. + m = Hasher() + m.update(self.config) + these_settings = m.hexdigest() + if self.status.settings_hash() != these_settings: + self.status.reset() + self.status.set_settings_hash(these_settings) + + # The user may have extra CSS they want copied. + if self.config.extra_css: + self.extra_css = os.path.basename(self.config.extra_css) + + # Process all the files. + self.report_files(self.html_file, morfs, self.config.html_dir) + + if not self.all_files_nums: + raise CoverageException("No data to report.") + + # Write the index file. + self.index_file() + + self.make_local_static_report_files() + return self.totals.n_statements and self.totals.pc_covered + + def make_local_static_report_files(self): + """Make local instances of static files for HTML report.""" + # The files we provide must always be copied. + for static, pkgdir in self.STATIC_FILES: + shutil.copyfile( + data_filename(static, pkgdir), + os.path.join(self.directory, static) + ) + + # The user may have extra CSS they want copied. + if self.extra_css: + shutil.copyfile( + self.config.extra_css, + os.path.join(self.directory, self.extra_css) + ) + + def file_hash(self, source, fr): + """Compute a hash that changes if the file needs to be re-reported.""" + m = Hasher() + m.update(source) + self.coverage.data.add_to_hash(fr.filename, m) + return m.hexdigest() + + def html_file(self, fr, analysis): + """Generate an HTML file for one source file.""" + rootname = flat_rootname(fr.relative_filename()) + html_filename = rootname + ".html" + html_path = os.path.join(self.directory, html_filename) + + # Get the numbers for this file. + nums = analysis.numbers + self.all_files_nums.append(nums) + + if self.config.skip_covered: + # Don't report on 100% files. + no_missing_lines = (nums.n_missing == 0) + no_missing_branches = (nums.n_partial_branches == 0) + if no_missing_lines and no_missing_branches: + # If there's an existing file, remove it. + file_be_gone(html_path) + return + + source = fr.source() + + # Find out if the file on disk is already correct. + this_hash = self.file_hash(source.encode('utf-8'), fr) + that_hash = self.status.file_hash(rootname) + if this_hash == that_hash: + # Nothing has changed to require the file to be reported again. + self.files.append(self.status.index_info(rootname)) + return + + self.status.set_file_hash(rootname, this_hash) + + if self.has_arcs: + missing_branch_arcs = analysis.missing_branch_arcs() + arcs_executed = analysis.arcs_executed() + + # These classes determine which lines are highlighted by default. + c_run = "run hide_run" + c_exc = "exc" + c_mis = "mis" + c_par = "par " + c_run + + lines = [] + + for lineno, line in enumerate(fr.source_token_lines(), start=1): + # Figure out how to mark this line. + line_class = [] + annotate_html = "" + annotate_long = "" + if lineno in analysis.statements: + line_class.append("stm") + if lineno in analysis.excluded: + line_class.append(c_exc) + elif lineno in analysis.missing: + line_class.append(c_mis) + elif self.has_arcs and lineno in missing_branch_arcs: + line_class.append(c_par) + shorts = [] + longs = [] + for b in missing_branch_arcs[lineno]: + if b < 0: + shorts.append("exit") + else: + shorts.append(b) + longs.append(fr.missing_arc_description(lineno, b, arcs_executed)) + # 202F is NARROW NO-BREAK SPACE. + # 219B is RIGHTWARDS ARROW WITH STROKE. + short_fmt = "%s ↛ %s" + annotate_html = ",   ".join(short_fmt % (lineno, d) for d in shorts) + + if len(longs) == 1: + annotate_long = longs[0] + else: + annotate_long = "%d missed branches: %s" % ( + len(longs), + ", ".join("%d) %s" % (num, ann_long) + for num, ann_long in enumerate(longs, start=1)), + ) + elif lineno in analysis.statements: + line_class.append(c_run) + + # Build the HTML for the line. + html = [] + for tok_type, tok_text in line: + if tok_type == "ws": + html.append(escape(tok_text)) + else: + tok_html = escape(tok_text) or ' ' + html.append( + '%s' % (tok_type, tok_html) + ) + + lines.append({ + 'html': ''.join(html), + 'number': lineno, + 'class': ' '.join(line_class) or "pln", + 'annotate': annotate_html, + 'annotate_long': annotate_long, + }) + + # Write the HTML page for this file. + html = self.source_tmpl.render({ + 'c_exc': c_exc, + 'c_mis': c_mis, + 'c_par': c_par, + 'c_run': c_run, + 'has_arcs': self.has_arcs, + 'extra_css': self.extra_css, + 'fr': fr, + 'nums': nums, + 'lines': lines, + 'time_stamp': self.time_stamp, + }) + + write_html(html_path, html) + + # Save this file's information for the index file. + index_info = { + 'nums': nums, + 'html_filename': html_filename, + 'relative_filename': fr.relative_filename(), + } + self.files.append(index_info) + self.status.set_index_info(rootname, index_info) + + def index_file(self): + """Write the index.html file for this report.""" + index_tmpl = Templite(read_data("index.html"), self.template_globals) + + self.totals = sum(self.all_files_nums) + + html = index_tmpl.render({ + 'has_arcs': self.has_arcs, + 'extra_css': self.extra_css, + 'files': self.files, + 'totals': self.totals, + 'time_stamp': self.time_stamp, + }) + + write_html(os.path.join(self.directory, "index.html"), html) + + # Write the latest hashes for next time. + self.status.write(self.directory) + + +class HtmlStatus(object): + """The status information we keep to support incremental reporting.""" + + STATUS_FILE = "status.json" + STATUS_FORMAT = 1 + + # pylint: disable=wrong-spelling-in-comment,useless-suppression + # The data looks like: + # + # { + # 'format': 1, + # 'settings': '540ee119c15d52a68a53fe6f0897346d', + # 'version': '4.0a1', + # 'files': { + # 'cogapp___init__': { + # 'hash': 'e45581a5b48f879f301c0f30bf77a50c', + # 'index': { + # 'html_filename': 'cogapp___init__.html', + # 'name': 'cogapp/__init__', + # 'nums': , + # } + # }, + # ... + # 'cogapp_whiteutils': { + # 'hash': '8504bb427fc488c4176809ded0277d51', + # 'index': { + # 'html_filename': 'cogapp_whiteutils.html', + # 'name': 'cogapp/whiteutils', + # 'nums': , + # } + # }, + # }, + # } + + def __init__(self): + self.reset() + + def reset(self): + """Initialize to empty.""" + self.settings = '' + self.files = {} + + def read(self, directory): + """Read the last status in `directory`.""" + usable = False + try: + status_file = os.path.join(directory, self.STATUS_FILE) + with open(status_file, "r") as fstatus: + status = json.load(fstatus) + except (IOError, ValueError): + usable = False + else: + usable = True + if status['format'] != self.STATUS_FORMAT: + usable = False + elif status['version'] != coverage.__version__: + usable = False + + if usable: + self.files = {} + for filename, fileinfo in iitems(status['files']): + fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums']) + self.files[filename] = fileinfo + self.settings = status['settings'] + else: + self.reset() + + def write(self, directory): + """Write the current status to `directory`.""" + status_file = os.path.join(directory, self.STATUS_FILE) + files = {} + for filename, fileinfo in iitems(self.files): + fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args() + files[filename] = fileinfo + + status = { + 'format': self.STATUS_FORMAT, + 'version': coverage.__version__, + 'settings': self.settings, + 'files': files, + } + with open(status_file, "w") as fout: + json.dump(status, fout, separators=(',', ':')) + + # Older versions of ShiningPanda look for the old name, status.dat. + # Accommodate them if we are running under Jenkins. + # https://issues.jenkins-ci.org/browse/JENKINS-28428 + if "JENKINS_URL" in os.environ: + with open(os.path.join(directory, "status.dat"), "w") as dat: + dat.write("https://issues.jenkins-ci.org/browse/JENKINS-28428\n") + + def settings_hash(self): + """Get the hash of the coverage.py settings.""" + return self.settings + + def set_settings_hash(self, settings): + """Set the hash of the coverage.py settings.""" + self.settings = settings + + def file_hash(self, fname): + """Get the hash of `fname`'s contents.""" + return self.files.get(fname, {}).get('hash', '') + + def set_file_hash(self, fname, val): + """Set the hash of `fname`'s contents.""" + self.files.setdefault(fname, {})['hash'] = val + + def index_info(self, fname): + """Get the information for index.html for `fname`.""" + return self.files.get(fname, {}).get('index', {}) + + def set_index_info(self, fname, info): + """Set the information for index.html for `fname`.""" + self.files.setdefault(fname, {})['index'] = info + + +# Helpers for templates and generating HTML + +def escape(t): + """HTML-escape the text in `t`. + + This is only suitable for HTML text, not attributes. + + """ + # Convert HTML special chars into HTML entities. + return t.replace("&", "&").replace("<", "<") + + +def pair(ratio): + """Format a pair of numbers so JavaScript can read them in an attribute.""" + return "%s %s" % ratio diff --git a/env_27/lib/python2.7/site-packages/coverage/htmlfiles/coverage_html.js b/env_27/lib/python2.7/site-packages/coverage/htmlfiles/coverage_html.js new file mode 100644 index 0000000..f6f5de2 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/htmlfiles/coverage_html.js @@ -0,0 +1,584 @@ +// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +// For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +// Coverage.py HTML report browser code. +/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */ +/*global coverage: true, document, window, $ */ + +coverage = {}; + +// Find all the elements with shortkey_* class, and use them to assign a shortcut key. +coverage.assign_shortkeys = function () { + $("*[class*='shortkey_']").each(function (i, e) { + $.each($(e).attr("class").split(" "), function (i, c) { + if (/^shortkey_/.test(c)) { + $(document).bind('keydown', c.substr(9), function () { + $(e).click(); + }); + } + }); + }); +}; + +// Create the events for the help panel. +coverage.wire_up_help_panel = function () { + $("#keyboard_icon").click(function () { + // Show the help panel, and position it so the keyboard icon in the + // panel is in the same place as the keyboard icon in the header. + $(".help_panel").show(); + var koff = $("#keyboard_icon").offset(); + var poff = $("#panel_icon").position(); + $(".help_panel").offset({ + top: koff.top-poff.top, + left: koff.left-poff.left + }); + }); + $("#panel_icon").click(function () { + $(".help_panel").hide(); + }); +}; + +// Create the events for the filter box. +coverage.wire_up_filter = function () { + // Cache elements. + var table = $("table.index"); + var table_rows = table.find("tbody tr"); + var table_row_names = table_rows.find("td.name a"); + var no_rows = $("#no_rows"); + + // Create a duplicate table footer that we can modify with dynamic summed values. + var table_footer = $("table.index tfoot tr"); + var table_dynamic_footer = table_footer.clone(); + table_dynamic_footer.attr('class', 'total_dynamic hidden'); + table_footer.after(table_dynamic_footer); + + // Observe filter keyevents. + $("#filter").on("keyup change", $.debounce(150, function (event) { + var filter_value = $(this).val(); + + if (filter_value === "") { + // Filter box is empty, remove all filtering. + table_rows.removeClass("hidden"); + + // Show standard footer, hide dynamic footer. + table_footer.removeClass("hidden"); + table_dynamic_footer.addClass("hidden"); + + // Hide placeholder, show table. + if (no_rows.length > 0) { + no_rows.hide(); + } + table.show(); + + } + else { + // Filter table items by value. + var hidden = 0; + var shown = 0; + + // Hide / show elements. + $.each(table_row_names, function () { + var element = $(this).parents("tr"); + + if ($(this).text().indexOf(filter_value) === -1) { + // hide + element.addClass("hidden"); + hidden++; + } + else { + // show + element.removeClass("hidden"); + shown++; + } + }); + + // Show placeholder if no rows will be displayed. + if (no_rows.length > 0) { + if (shown === 0) { + // Show placeholder, hide table. + no_rows.show(); + table.hide(); + } + else { + // Hide placeholder, show table. + no_rows.hide(); + table.show(); + } + } + + // Manage dynamic header: + if (hidden > 0) { + // Calculate new dynamic sum values based on visible rows. + for (var column = 2; column < 20; column++) { + // Calculate summed value. + var cells = table_rows.find('td:nth-child(' + column + ')'); + if (!cells.length) { + // No more columns...! + break; + } + + var sum = 0, numer = 0, denom = 0; + $.each(cells.filter(':visible'), function () { + var ratio = $(this).data("ratio"); + if (ratio) { + var splitted = ratio.split(" "); + numer += parseInt(splitted[0], 10); + denom += parseInt(splitted[1], 10); + } + else { + sum += parseInt(this.innerHTML, 10); + } + }); + + // Get footer cell element. + var footer_cell = table_dynamic_footer.find('td:nth-child(' + column + ')'); + + // Set value into dynamic footer cell element. + if (cells[0].innerHTML.indexOf('%') > -1) { + // Percentage columns use the numerator and denominator, + // and adapt to the number of decimal places. + var match = /\.([0-9]+)/.exec(cells[0].innerHTML); + var places = 0; + if (match) { + places = match[1].length; + } + var pct = numer * 100 / denom; + footer_cell.text(pct.toFixed(places) + '%'); + } + else { + footer_cell.text(sum); + } + } + + // Hide standard footer, show dynamic footer. + table_footer.addClass("hidden"); + table_dynamic_footer.removeClass("hidden"); + } + else { + // Show standard footer, hide dynamic footer. + table_footer.removeClass("hidden"); + table_dynamic_footer.addClass("hidden"); + } + } + })); + + // Trigger change event on setup, to force filter on page refresh + // (filter value may still be present). + $("#filter").trigger("change"); +}; + +// Loaded on index.html +coverage.index_ready = function ($) { + // Look for a cookie containing previous sort settings: + var sort_list = []; + var cookie_name = "COVERAGE_INDEX_SORT"; + var i; + + // This almost makes it worth installing the jQuery cookie plugin: + if (document.cookie.indexOf(cookie_name) > -1) { + var cookies = document.cookie.split(";"); + for (i = 0; i < cookies.length; i++) { + var parts = cookies[i].split("="); + + if ($.trim(parts[0]) === cookie_name && parts[1]) { + sort_list = eval("[[" + parts[1] + "]]"); + break; + } + } + } + + // Create a new widget which exists only to save and restore + // the sort order: + $.tablesorter.addWidget({ + id: "persistentSort", + + // Format is called by the widget before displaying: + format: function (table) { + if (table.config.sortList.length === 0 && sort_list.length > 0) { + // This table hasn't been sorted before - we'll use + // our stored settings: + $(table).trigger('sorton', [sort_list]); + } + else { + // This is not the first load - something has + // already defined sorting so we'll just update + // our stored value to match: + sort_list = table.config.sortList; + } + } + }); + + // Configure our tablesorter to handle the variable number of + // columns produced depending on report options: + var headers = []; + var col_count = $("table.index > thead > tr > th").length; + + headers[0] = { sorter: 'text' }; + for (i = 1; i < col_count-1; i++) { + headers[i] = { sorter: 'digit' }; + } + headers[col_count-1] = { sorter: 'percent' }; + + // Enable the table sorter: + $("table.index").tablesorter({ + widgets: ['persistentSort'], + headers: headers + }); + + coverage.assign_shortkeys(); + coverage.wire_up_help_panel(); + coverage.wire_up_filter(); + + // Watch for page unload events so we can save the final sort settings: + $(window).unload(function () { + document.cookie = cookie_name + "=" + sort_list.toString() + "; path=/"; + }); +}; + +// -- pyfile stuff -- + +coverage.pyfile_ready = function ($) { + // If we're directed to a particular line number, highlight the line. + var frag = location.hash; + if (frag.length > 2 && frag[1] === 'n') { + $(frag).addClass('highlight'); + coverage.set_sel(parseInt(frag.substr(2), 10)); + } + else { + coverage.set_sel(0); + } + + $(document) + .bind('keydown', 'j', coverage.to_next_chunk_nicely) + .bind('keydown', 'k', coverage.to_prev_chunk_nicely) + .bind('keydown', '0', coverage.to_top) + .bind('keydown', '1', coverage.to_first_chunk) + ; + + $(".button_toggle_run").click(function (evt) {coverage.toggle_lines(evt.target, "run");}); + $(".button_toggle_exc").click(function (evt) {coverage.toggle_lines(evt.target, "exc");}); + $(".button_toggle_mis").click(function (evt) {coverage.toggle_lines(evt.target, "mis");}); + $(".button_toggle_par").click(function (evt) {coverage.toggle_lines(evt.target, "par");}); + + coverage.assign_shortkeys(); + coverage.wire_up_help_panel(); + + coverage.init_scroll_markers(); + + // Rebuild scroll markers after window high changing + $(window).resize(coverage.resize_scroll_markers); +}; + +coverage.toggle_lines = function (btn, cls) { + btn = $(btn); + var hide = "hide_"+cls; + if (btn.hasClass(hide)) { + $("#source ."+cls).removeClass(hide); + btn.removeClass(hide); + } + else { + $("#source ."+cls).addClass(hide); + btn.addClass(hide); + } +}; + +// Return the nth line div. +coverage.line_elt = function (n) { + return $("#t" + n); +}; + +// Return the nth line number div. +coverage.num_elt = function (n) { + return $("#n" + n); +}; + +// Return the container of all the code. +coverage.code_container = function () { + return $(".linenos"); +}; + +// Set the selection. b and e are line numbers. +coverage.set_sel = function (b, e) { + // The first line selected. + coverage.sel_begin = b; + // The next line not selected. + coverage.sel_end = (e === undefined) ? b+1 : e; +}; + +coverage.to_top = function () { + coverage.set_sel(0, 1); + coverage.scroll_window(0); +}; + +coverage.to_first_chunk = function () { + coverage.set_sel(0, 1); + coverage.to_next_chunk(); +}; + +coverage.is_transparent = function (color) { + // Different browsers return different colors for "none". + return color === "transparent" || color === "rgba(0, 0, 0, 0)"; +}; + +coverage.to_next_chunk = function () { + var c = coverage; + + // Find the start of the next colored chunk. + var probe = c.sel_end; + var color, probe_line; + while (true) { + probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + return; + } + color = probe_line.css("background-color"); + if (!c.is_transparent(color)) { + break; + } + probe++; + } + + // There's a next chunk, `probe` points to it. + var begin = probe; + + // Find the end of this chunk. + var next_color = color; + while (next_color === color) { + probe++; + probe_line = c.line_elt(probe); + next_color = probe_line.css("background-color"); + } + c.set_sel(begin, probe); + c.show_selection(); +}; + +coverage.to_prev_chunk = function () { + var c = coverage; + + // Find the end of the prev colored chunk. + var probe = c.sel_begin-1; + var probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + return; + } + var color = probe_line.css("background-color"); + while (probe > 0 && c.is_transparent(color)) { + probe--; + probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + return; + } + color = probe_line.css("background-color"); + } + + // There's a prev chunk, `probe` points to its last line. + var end = probe+1; + + // Find the beginning of this chunk. + var prev_color = color; + while (prev_color === color) { + probe--; + probe_line = c.line_elt(probe); + prev_color = probe_line.css("background-color"); + } + c.set_sel(probe+1, end); + c.show_selection(); +}; + +// Return the line number of the line nearest pixel position pos +coverage.line_at_pos = function (pos) { + var l1 = coverage.line_elt(1), + l2 = coverage.line_elt(2), + result; + if (l1.length && l2.length) { + var l1_top = l1.offset().top, + line_height = l2.offset().top - l1_top, + nlines = (pos - l1_top) / line_height; + if (nlines < 1) { + result = 1; + } + else { + result = Math.ceil(nlines); + } + } + else { + result = 1; + } + return result; +}; + +// Returns 0, 1, or 2: how many of the two ends of the selection are on +// the screen right now? +coverage.selection_ends_on_screen = function () { + if (coverage.sel_begin === 0) { + return 0; + } + + var top = coverage.line_elt(coverage.sel_begin); + var next = coverage.line_elt(coverage.sel_end-1); + + return ( + (top.isOnScreen() ? 1 : 0) + + (next.isOnScreen() ? 1 : 0) + ); +}; + +coverage.to_next_chunk_nicely = function () { + coverage.finish_scrolling(); + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: select the top line on + // the screen. + var win = $(window); + coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop())); + } + coverage.to_next_chunk(); +}; + +coverage.to_prev_chunk_nicely = function () { + coverage.finish_scrolling(); + if (coverage.selection_ends_on_screen() === 0) { + var win = $(window); + coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop() + win.height())); + } + coverage.to_prev_chunk(); +}; + +// Select line number lineno, or if it is in a colored chunk, select the +// entire chunk +coverage.select_line_or_chunk = function (lineno) { + var c = coverage; + var probe_line = c.line_elt(lineno); + if (probe_line.length === 0) { + return; + } + var the_color = probe_line.css("background-color"); + if (!c.is_transparent(the_color)) { + // The line is in a highlighted chunk. + // Search backward for the first line. + var probe = lineno; + var color = the_color; + while (probe > 0 && color === the_color) { + probe--; + probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + break; + } + color = probe_line.css("background-color"); + } + var begin = probe + 1; + + // Search forward for the last line. + probe = lineno; + color = the_color; + while (color === the_color) { + probe++; + probe_line = c.line_elt(probe); + color = probe_line.css("background-color"); + } + + coverage.set_sel(begin, probe); + } + else { + coverage.set_sel(lineno); + } +}; + +coverage.show_selection = function () { + var c = coverage; + + // Highlight the lines in the chunk + c.code_container().find(".highlight").removeClass("highlight"); + for (var probe = c.sel_begin; probe > 0 && probe < c.sel_end; probe++) { + c.num_elt(probe).addClass("highlight"); + } + + c.scroll_to_selection(); +}; + +coverage.scroll_to_selection = function () { + // Scroll the page if the chunk isn't fully visible. + if (coverage.selection_ends_on_screen() < 2) { + // Need to move the page. The html,body trick makes it scroll in all + // browsers, got it from http://stackoverflow.com/questions/3042651 + var top = coverage.line_elt(coverage.sel_begin); + var top_pos = parseInt(top.offset().top, 10); + coverage.scroll_window(top_pos - 30); + } +}; + +coverage.scroll_window = function (to_pos) { + $("html,body").animate({scrollTop: to_pos}, 200); +}; + +coverage.finish_scrolling = function () { + $("html,body").stop(true, true); +}; + +coverage.init_scroll_markers = function () { + var c = coverage; + // Init some variables + c.lines_len = $('td.text p').length; + c.body_h = $('body').height(); + c.header_h = $('div#header').height(); + c.missed_lines = $('td.text p.mis, td.text p.par'); + + // Build html + c.resize_scroll_markers(); +}; + +coverage.resize_scroll_markers = function () { + var c = coverage, + min_line_height = 3, + max_line_height = 10, + visible_window_h = $(window).height(); + + $('#scroll_marker').remove(); + // Don't build markers if the window has no scroll bar. + if (c.body_h <= visible_window_h) { + return; + } + + $("body").append("
 
"); + var scroll_marker = $('#scroll_marker'), + marker_scale = scroll_marker.height() / c.body_h, + line_height = scroll_marker.height() / c.lines_len; + + // Line height must be between the extremes. + if (line_height > min_line_height) { + if (line_height > max_line_height) { + line_height = max_line_height; + } + } + else { + line_height = min_line_height; + } + + var previous_line = -99, + last_mark, + last_top; + + c.missed_lines.each(function () { + var line_top = Math.round($(this).offset().top * marker_scale), + id_name = $(this).attr('id'), + line_number = parseInt(id_name.substring(1, id_name.length)); + + if (line_number === previous_line + 1) { + // If this solid missed block just make previous mark higher. + last_mark.css({ + 'height': line_top + line_height - last_top + }); + } + else { + // Add colored line in scroll_marker block. + scroll_marker.append('
'); + last_mark = $('#m' + line_number); + last_mark.css({ + 'height': line_height, + 'top': line_top + }); + last_top = line_top; + } + + previous_line = line_number; + }); +}; diff --git a/env_27/lib/python2.7/site-packages/coverage/htmlfiles/index.html b/env_27/lib/python2.7/site-packages/coverage/htmlfiles/index.html new file mode 100644 index 0000000..1e3999f --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/htmlfiles/index.html @@ -0,0 +1,118 @@ +{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #} +{# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt #} + + + + + + {{ title|escape }} + + {% if extra_css %} + + {% endif %} + + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ n + s + m + x + {% if has_arcs %} + b + p + {% endif %} + c   change column sorting +

+
+
+ +
+ + + {# The title="" attr doesn"t work in Safari. #} + + + + + + {% if has_arcs %} + + + {% endif %} + + + + {# HTML syntax requires thead, tfoot, tbody #} + + + + + + + {% if has_arcs %} + + + {% endif %} + + + + + {% for file in files %} + + + + + + {% if has_arcs %} + + + {% endif %} + + + {% endfor %} + +
Modulestatementsmissingexcludedbranchespartialcoverage
Total{{totals.n_statements}}{{totals.n_missing}}{{totals.n_excluded}}{{totals.n_branches}}{{totals.n_partial_branches}}{{totals.pc_covered_str}}%
{{file.relative_filename}}{{file.nums.n_statements}}{{file.nums.n_missing}}{{file.nums.n_excluded}}{{file.nums.n_branches}}{{file.nums.n_partial_branches}}{{file.nums.pc_covered_str}}%
+ +

+ No items found using the specified filter. +

+
+ + + + + diff --git a/env_27/lib/python2.7/site-packages/coverage/htmlfiles/jquery.ba-throttle-debounce.min.js b/env_27/lib/python2.7/site-packages/coverage/htmlfiles/jquery.ba-throttle-debounce.min.js new file mode 100644 index 0000000..648fe5d --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/htmlfiles/jquery.ba-throttle-debounce.min.js @@ -0,0 +1,9 @@ +/* + * jQuery throttle / debounce - v1.1 - 3/7/2010 + * http://benalman.com/projects/jquery-throttle-debounce-plugin/ + * + * Copyright (c) 2010 "Cowboy" Ben Alman + * Dual licensed under the MIT and GPL licenses. + * http://benalman.com/about/license/ + */ +(function(b,c){var $=b.jQuery||b.Cowboy||(b.Cowboy={}),a;$.throttle=a=function(e,f,j,i){var h,d=0;if(typeof f!=="boolean"){i=j;j=f;f=c}function g(){var o=this,m=+new Date()-d,n=arguments;function l(){d=+new Date();j.apply(o,n)}function k(){h=c}if(i&&!h){l()}h&&clearTimeout(h);if(i===c&&m>e){l()}else{if(f!==true){h=setTimeout(i?k:l,i===c?e-m:e)}}}if($.guid){g.guid=j.guid=j.guid||$.guid++}return g};$.debounce=function(d,e,f){return f===c?a(d,e,false):a(d,f,e!==false)}})(this); diff --git a/env_27/lib/python2.7/site-packages/coverage/htmlfiles/jquery.hotkeys.js b/env_27/lib/python2.7/site-packages/coverage/htmlfiles/jquery.hotkeys.js new file mode 100644 index 0000000..09b21e0 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/htmlfiles/jquery.hotkeys.js @@ -0,0 +1,99 @@ +/* + * jQuery Hotkeys Plugin + * Copyright 2010, John Resig + * Dual licensed under the MIT or GPL Version 2 licenses. + * + * Based upon the plugin by Tzury Bar Yochay: + * http://github.com/tzuryby/hotkeys + * + * Original idea by: + * Binny V A, http://www.openjs.com/scripts/events/keyboard_shortcuts/ +*/ + +(function(jQuery){ + + jQuery.hotkeys = { + version: "0.8", + + specialKeys: { + 8: "backspace", 9: "tab", 13: "return", 16: "shift", 17: "ctrl", 18: "alt", 19: "pause", + 20: "capslock", 27: "esc", 32: "space", 33: "pageup", 34: "pagedown", 35: "end", 36: "home", + 37: "left", 38: "up", 39: "right", 40: "down", 45: "insert", 46: "del", + 96: "0", 97: "1", 98: "2", 99: "3", 100: "4", 101: "5", 102: "6", 103: "7", + 104: "8", 105: "9", 106: "*", 107: "+", 109: "-", 110: ".", 111 : "/", + 112: "f1", 113: "f2", 114: "f3", 115: "f4", 116: "f5", 117: "f6", 118: "f7", 119: "f8", + 120: "f9", 121: "f10", 122: "f11", 123: "f12", 144: "numlock", 145: "scroll", 191: "/", 224: "meta" + }, + + shiftNums: { + "`": "~", "1": "!", "2": "@", "3": "#", "4": "$", "5": "%", "6": "^", "7": "&", + "8": "*", "9": "(", "0": ")", "-": "_", "=": "+", ";": ": ", "'": "\"", ",": "<", + ".": ">", "/": "?", "\\": "|" + } + }; + + function keyHandler( handleObj ) { + // Only care when a possible input has been specified + if ( typeof handleObj.data !== "string" ) { + return; + } + + var origHandler = handleObj.handler, + keys = handleObj.data.toLowerCase().split(" "); + + handleObj.handler = function( event ) { + // Don't fire in text-accepting inputs that we didn't directly bind to + if ( this !== event.target && (/textarea|select/i.test( event.target.nodeName ) || + event.target.type === "text") ) { + return; + } + + // Keypress represents characters, not special keys + var special = event.type !== "keypress" && jQuery.hotkeys.specialKeys[ event.which ], + character = String.fromCharCode( event.which ).toLowerCase(), + key, modif = "", possible = {}; + + // check combinations (alt|ctrl|shift+anything) + if ( event.altKey && special !== "alt" ) { + modif += "alt+"; + } + + if ( event.ctrlKey && special !== "ctrl" ) { + modif += "ctrl+"; + } + + // TODO: Need to make sure this works consistently across platforms + if ( event.metaKey && !event.ctrlKey && special !== "meta" ) { + modif += "meta+"; + } + + if ( event.shiftKey && special !== "shift" ) { + modif += "shift+"; + } + + if ( special ) { + possible[ modif + special ] = true; + + } else { + possible[ modif + character ] = true; + possible[ modif + jQuery.hotkeys.shiftNums[ character ] ] = true; + + // "$" can be triggered as "Shift+4" or "Shift+$" or just "$" + if ( modif === "shift+" ) { + possible[ jQuery.hotkeys.shiftNums[ character ] ] = true; + } + } + + for ( var i = 0, l = keys.length; i < l; i++ ) { + if ( possible[ keys[i] ] ) { + return origHandler.apply( this, arguments ); + } + } + }; + } + + jQuery.each([ "keydown", "keyup", "keypress" ], function() { + jQuery.event.special[ this ] = { add: keyHandler }; + }); + +})( jQuery ); diff --git a/env_27/lib/python2.7/site-packages/coverage/htmlfiles/jquery.isonscreen.js b/env_27/lib/python2.7/site-packages/coverage/htmlfiles/jquery.isonscreen.js new file mode 100644 index 0000000..0182ebd --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/htmlfiles/jquery.isonscreen.js @@ -0,0 +1,53 @@ +/* Copyright (c) 2010 + * @author Laurence Wheway + * Dual licensed under the MIT (http://www.opensource.org/licenses/mit-license.php) + * and GPL (http://www.opensource.org/licenses/gpl-license.php) licenses. + * + * @version 1.2.0 + */ +(function($) { + jQuery.extend({ + isOnScreen: function(box, container) { + //ensure numbers come in as intgers (not strings) and remove 'px' is it's there + for(var i in box){box[i] = parseFloat(box[i])}; + for(var i in container){container[i] = parseFloat(container[i])}; + + if(!container){ + container = { + left: $(window).scrollLeft(), + top: $(window).scrollTop(), + width: $(window).width(), + height: $(window).height() + } + } + + if( box.left+box.width-container.left > 0 && + box.left < container.width+container.left && + box.top+box.height-container.top > 0 && + box.top < container.height+container.top + ) return true; + return false; + } + }) + + + jQuery.fn.isOnScreen = function (container) { + for(var i in container){container[i] = parseFloat(container[i])}; + + if(!container){ + container = { + left: $(window).scrollLeft(), + top: $(window).scrollTop(), + width: $(window).width(), + height: $(window).height() + } + } + + if( $(this).offset().left+$(this).width()-container.left > 0 && + $(this).offset().left < container.width+container.left && + $(this).offset().top+$(this).height()-container.top > 0 && + $(this).offset().top < container.height+container.top + ) return true; + return false; + } +})(jQuery); diff --git a/env_27/lib/python2.7/site-packages/coverage/htmlfiles/jquery.min.js b/env_27/lib/python2.7/site-packages/coverage/htmlfiles/jquery.min.js new file mode 100644 index 0000000..d1608e3 --- /dev/null +++ b/env_27/lib/python2.7/site-packages/coverage/htmlfiles/jquery.min.js @@ -0,0 +1,4 @@ +/*! jQuery v1.11.1 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */ +!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l="1.11.1",m=function(a,b){return new m.fn.init(a,b)},n=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,o=/^-ms-/,p=/-([\da-z])/gi,q=function(a,b){return b.toUpperCase()};m.fn=m.prototype={jquery:l,constructor:m,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=m.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return m.each(this,a,b)},map:function(a){return this.pushStack(m.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},m.extend=m.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||m.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(m.isPlainObject(c)||(b=m.isArray(c)))?(b?(b=!1,f=a&&m.isArray(a)?a:[]):f=a&&m.isPlainObject(a)?a:{},g[d]=m.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},m.extend({expando:"jQuery"+(l+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===m.type(a)},isArray:Array.isArray||function(a){return"array"===m.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){return!m.isArray(a)&&a-parseFloat(a)>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==m.type(a)||a.nodeType||m.isWindow(a))return!1;try{if(a.constructor&&!j.call(a,"constructor")&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(k.ownLast)for(b in a)return j.call(a,b);for(b in a);return void 0===b||j.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(b){b&&m.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(o,"ms-").replace(p,q)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=r(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(n,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(r(Object(a))?m.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(g)return g.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=r(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(f=a[b],b=a,a=f),m.isFunction(a)?(c=d.call(arguments,2),e=function(){return a.apply(b||this,c.concat(d.call(arguments)))},e.guid=a.guid=a.guid||m.guid++,e):void 0},now:function(){return+new Date},support:k}),m.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function r(a){var b=a.length,c=m.type(a);return"function"===c||m.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var s=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+-new Date,v=a.document,w=0,x=0,y=gb(),z=gb(),A=gb(),B=function(a,b){return a===b&&(l=!0),0},C="undefined",D=1<<31,E={}.hasOwnProperty,F=[],G=F.pop,H=F.push,I=F.push,J=F.slice,K=F.indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]===a)return b;return-1},L="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",N="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",O=N.replace("w","w#"),P="\\["+M+"*("+N+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+O+"))|)"+M+"*\\]",Q=":("+N+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+P+")*)|.*)\\)|)",R=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),S=new RegExp("^"+M+"*,"+M+"*"),T=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),V=new RegExp(Q),W=new RegExp("^"+O+"$"),X={ID:new RegExp("^#("+N+")"),CLASS:new RegExp("^\\.("+N+")"),TAG:new RegExp("^("+N.replace("w","w*")+")"),ATTR:new RegExp("^"+P),PSEUDO:new RegExp("^"+Q),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+L+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ab=/[+~]/,bb=/'|\\/g,cb=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),db=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)};try{I.apply(F=J.call(v.childNodes),v.childNodes),F[v.childNodes.length].nodeType}catch(eb){I={apply:F.length?function(a,b){H.apply(a,J.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],!a||"string"!=typeof a)return d;if(1!==(k=b.nodeType)&&9!==k)return[];if(p&&!e){if(f=_.exec(a))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return I.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName&&b.getElementsByClassName)return I.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=9===k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(bb,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+qb(o[l]);w=ab.test(a)&&ob(b.parentNode)||b,x=o.join(",")}if(x)try{return I.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function gb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function hb(a){return a[u]=!0,a}function ib(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function jb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function kb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||D)-(~a.sourceIndex||D);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function lb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function mb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function nb(a){return hb(function(b){return b=+b,hb(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function ob(a){return a&&typeof a.getElementsByTagName!==C&&a}c=fb.support={},f=fb.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fb.setDocument=function(a){var b,e=a?a.ownerDocument||a:v,g=e.defaultView;return e!==n&&9===e.nodeType&&e.documentElement?(n=e,o=e.documentElement,p=!f(e),g&&g!==g.top&&(g.addEventListener?g.addEventListener("unload",function(){m()},!1):g.attachEvent&&g.attachEvent("onunload",function(){m()})),c.attributes=ib(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ib(function(a){return a.appendChild(e.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(e.getElementsByClassName)&&ib(function(a){return a.innerHTML="
",a.firstChild.className="i",2===a.getElementsByClassName("i").length}),c.getById=ib(function(a){return o.appendChild(a).id=u,!e.getElementsByName||!e.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if(typeof b.getElementById!==C&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){var c=typeof a.getAttributeNode!==C&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return typeof b.getElementsByTagName!==C?b.getElementsByTagName(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return typeof b.getElementsByClassName!==C&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(e.querySelectorAll))&&(ib(function(a){a.innerHTML="",a.querySelectorAll("[msallowclip^='']").length&&q.push("[*^$]="+M+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+M+"*(?:value|"+L+")"),a.querySelectorAll(":checked").length||q.push(":checked")}),ib(function(a){var b=e.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+M+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ib(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",Q)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===e||a.ownerDocument===v&&t(v,a)?-1:b===e||b.ownerDocument===v&&t(v,b)?1:k?K.call(k,a)-K.call(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,f=a.parentNode,g=b.parentNode,h=[a],i=[b];if(!f||!g)return a===e?-1:b===e?1:f?-1:g?1:k?K.call(k,a)-K.call(k,b):0;if(f===g)return kb(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?kb(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},e):n},fb.matches=function(a,b){return fb(a,null,null,b)},fb.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fb(b,n,null,[a]).length>0},fb.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fb.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&E.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fb.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fb.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fb.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fb.selectors={cacheLength:50,createPseudo:hb,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(cb,db),a[3]=(a[3]||a[4]||a[5]||"").replace(cb,db),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fb.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fb.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(cb,db).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+M+")"+a+"("+M+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||typeof a.getAttribute!==C&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fb.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fb.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?hb(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=K.call(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:hb(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?hb(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:hb(function(a){return function(b){return fb(a,b).length>0}}),contains:hb(function(a){return function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:hb(function(a){return W.test(a||"")||fb.error("unsupported lang: "+a),a=a.replace(cb,db).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:nb(function(){return[0]}),last:nb(function(a,b){return[b-1]}),eq:nb(function(a,b,c){return[0>c?c+b:c]}),even:nb(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:nb(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:nb(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:nb(function(a,b,c){for(var d=0>c?c+b:c;++db;b++)d+=a[b].value;return d}function rb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function sb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function tb(a,b,c){for(var d=0,e=b.length;e>d;d++)fb(a,b[d],c);return c}function ub(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function vb(a,b,c,d,e,f){return d&&!d[u]&&(d=vb(d)),e&&!e[u]&&(e=vb(e,f)),hb(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||tb(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ub(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ub(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?K.call(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ub(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):I.apply(g,r)})}function wb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=rb(function(a){return a===b},h,!0),l=rb(function(a){return K.call(b,a)>-1},h,!0),m=[function(a,c,d){return!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d))}];f>i;i++)if(c=d.relative[a[i].type])m=[rb(sb(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return vb(i>1&&sb(m),i>1&&qb(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&wb(a.slice(i,e)),f>e&&wb(a=a.slice(e)),f>e&&qb(a))}m.push(c)}return sb(m)}function xb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=G.call(i));s=ub(s)}I.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&fb.uniqueSort(i)}return k&&(w=v,j=t),r};return c?hb(f):f}return h=fb.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wb(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xb(e,d)),f.selector=a}return f},i=fb.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(cb,db),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(cb,db),ab.test(j[0].type)&&ob(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qb(j),!a)return I.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,ab.test(a)&&ob(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ib(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ib(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||jb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ib(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||jb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ib(function(a){return null==a.getAttribute("disabled")})||jb(L,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fb}(a);m.find=s,m.expr=s.selectors,m.expr[":"]=m.expr.pseudos,m.unique=s.uniqueSort,m.text=s.getText,m.isXMLDoc=s.isXML,m.contains=s.contains;var t=m.expr.match.needsContext,u=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,v=/^.[^:#\[\.,]*$/;function w(a,b,c){if(m.isFunction(b))return m.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return m.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(v.test(b))return m.filter(b,a,c);b=m.filter(b,a)}return m.grep(a,function(a){return m.inArray(a,b)>=0!==c})}m.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?m.find.matchesSelector(d,a)?[d]:[]:m.find.matches(a,m.grep(b,function(a){return 1===a.nodeType}))},m.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(m(a).filter(function(){for(b=0;e>b;b++)if(m.contains(d[b],this))return!0}));for(b=0;e>b;b++)m.find(a,d[b],c);return c=this.pushStack(e>1?m.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(w(this,a||[],!1))},not:function(a){return this.pushStack(w(this,a||[],!0))},is:function(a){return!!w(this,"string"==typeof a&&t.test(a)?m(a):a||[],!1).length}});var x,y=a.document,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=m.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||x).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof m?b[0]:b,m.merge(this,m.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:y,!0)),u.test(c[1])&&m.isPlainObject(b))for(c in b)m.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}if(d=y.getElementById(c[2]),d&&d.parentNode){if(d.id!==c[2])return x.find(a);this.length=1,this[0]=d}return this.context=y,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):m.isFunction(a)?"undefined"!=typeof x.ready?x.ready(a):a(m):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),m.makeArray(a,this))};A.prototype=m.fn,x=m(y);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};m.extend({dir:function(a,b,c){var d=[],e=a[b];while(e&&9!==e.nodeType&&(void 0===c||1!==e.nodeType||!m(e).is(c)))1===e.nodeType&&d.push(e),e=e[b];return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),m.fn.extend({has:function(a){var b,c=m(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(m.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=t.test(a)||"string"!=typeof a?m(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&m.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?m.unique(f):f)},index:function(a){return a?"string"==typeof a?m.inArray(this[0],m(a)):m.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(m.unique(m.merge(this.get(),m(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}m.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return m.dir(a,"parentNode")},parentsUntil:function(a,b,c){return m.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return m.dir(a,"nextSibling")},prevAll:function(a){return m.dir(a,"previousSibling")},nextUntil:function(a,b,c){return m.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return m.dir(a,"previousSibling",c)},siblings:function(a){return m.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return m.sibling(a.firstChild)},contents:function(a){return m.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:m.merge([],a.childNodes)}},function(a,b){m.fn[a]=function(c,d){var e=m.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=m.filter(d,e)),this.length>1&&(C[a]||(e=m.unique(e)),B.test(a)&&(e=e.reverse())),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return m.each(a.match(E)||[],function(a,c){b[c]=!0}),b}m.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):m.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(c=a.memory&&l,d=!0,f=g||0,g=0,e=h.length,b=!0;h&&e>f;f++)if(h[f].apply(l[0],l[1])===!1&&a.stopOnFalse){c=!1;break}b=!1,h&&(i?i.length&&j(i.shift()):c?h=[]:k.disable())},k={add:function(){if(h){var d=h.length;!function f(b){m.each(b,function(b,c){var d=m.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&f(c)})}(arguments),b?e=h.length:c&&(g=d,j(c))}return this},remove:function(){return h&&m.each(arguments,function(a,c){var d;while((d=m.inArray(c,h,d))>-1)h.splice(d,1),b&&(e>=d&&e--,f>=d&&f--)}),this},has:function(a){return a?m.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],e=0,this},disable:function(){return h=i=c=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,c||k.disable(),this},locked:function(){return!i},fireWith:function(a,c){return!h||d&&!i||(c=c||[],c=[a,c.slice?c.slice():c],b?i.push(c):j(c)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!d}};return k},m.extend({Deferred:function(a){var b=[["resolve","done",m.Callbacks("once memory"),"resolved"],["reject","fail",m.Callbacks("once memory"),"rejected"],["notify","progress",m.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return m.Deferred(function(c){m.each(b,function(b,f){var g=m.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&m.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?m.extend(a,d):d}},e={};return d.pipe=d.then,m.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&m.isFunction(a.promise)?e:0,g=1===f?a:m.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&m.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;m.fn.ready=function(a){return m.ready.promise().done(a),this},m.extend({isReady:!1,readyWait:1,holdReady:function(a){a?m.readyWait++:m.ready(!0)},ready:function(a){if(a===!0?!--m.readyWait:!m.isReady){if(!y.body)return setTimeout(m.ready);m.isReady=!0,a!==!0&&--m.readyWait>0||(H.resolveWith(y,[m]),m.fn.triggerHandler&&(m(y).triggerHandler("ready"),m(y).off("ready")))}}});function I(){y.addEventListener?(y.removeEventListener("DOMContentLoaded",J,!1),a.removeEventListener("load",J,!1)):(y.detachEvent("onreadystatechange",J),a.detachEvent("onload",J))}function J(){(y.addEventListener||"load"===event.type||"complete"===y.readyState)&&(I(),m.ready())}m.ready.promise=function(b){if(!H)if(H=m.Deferred(),"complete"===y.readyState)setTimeout(m.ready);else if(y.addEventListener)y.addEventListener("DOMContentLoaded",J,!1),a.addEventListener("load",J,!1);else{y.attachEvent("onreadystatechange",J),a.attachEvent("onload",J);var c=!1;try{c=null==a.frameElement&&y.documentElement}catch(d){}c&&c.doScroll&&!function e(){if(!m.isReady){try{c.doScroll("left")}catch(a){return setTimeout(e,50)}I(),m.ready()}}()}return H.promise(b)};var K="undefined",L;for(L in m(k))break;k.ownLast="0"!==L,k.inlineBlockNeedsLayout=!1,m(function(){var a,b,c,d;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",k.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(d))}),function(){var a=y.createElement("div");if(null==k.deleteExpando){k.deleteExpando=!0;try{delete a.test}catch(b){k.deleteExpando=!1}}a=null}(),m.acceptData=function(a){var b=m.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b};var M=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,N=/([A-Z])/g;function O(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(N,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:M.test(c)?m.parseJSON(c):c}catch(e){}m.data(a,b,c)}else c=void 0}return c}function P(a){var b;for(b in a)if(("data"!==b||!m.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;return!0}function Q(a,b,d,e){if(m.acceptData(a)){var f,g,h=m.expando,i=a.nodeType,j=i?m.cache:a,k=i?a[h]:a[h]&&h; +if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||m.guid++:h),j[k]||(j[k]=i?{}:{toJSON:m.noop}),("object"==typeof b||"function"==typeof b)&&(e?j[k]=m.extend(j[k],b):j[k].data=m.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[m.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[m.camelCase(b)])):f=g,f}}function R(a,b,c){if(m.acceptData(a)){var d,e,f=a.nodeType,g=f?m.cache:a,h=f?a[m.expando]:m.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){m.isArray(b)?b=b.concat(m.map(b,m.camelCase)):b in d?b=[b]:(b=m.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!P(d):!m.isEmptyObject(d))return}(c||(delete g[h].data,P(g[h])))&&(f?m.cleanData([a],!0):k.deleteExpando||g!=g.window?delete g[h]:g[h]=null)}}}m.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?m.cache[a[m.expando]]:a[m.expando],!!a&&!P(a)},data:function(a,b,c){return Q(a,b,c)},removeData:function(a,b){return R(a,b)},_data:function(a,b,c){return Q(a,b,c,!0)},_removeData:function(a,b){return R(a,b,!0)}}),m.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=m.data(f),1===f.nodeType&&!m._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=m.camelCase(d.slice(5)),O(f,d,e[d])));m._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){m.data(this,a)}):arguments.length>1?this.each(function(){m.data(this,a,b)}):f?O(f,a,m.data(f,a)):void 0},removeData:function(a){return this.each(function(){m.removeData(this,a)})}}),m.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=m._data(a,b),c&&(!d||m.isArray(c)?d=m._data(a,b,m.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=m.queue(a,b),d=c.length,e=c.shift(),f=m._queueHooks(a,b),g=function(){m.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return m._data(a,c)||m._data(a,c,{empty:m.Callbacks("once memory").add(function(){m._removeData(a,b+"queue"),m._removeData(a,c)})})}}),m.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.lengthh;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},W=/^(?:checkbox|radio)$/i;!function(){var a=y.createElement("input"),b=y.createElement("div"),c=y.createDocumentFragment();if(b.innerHTML="
a",k.leadingWhitespace=3===b.firstChild.nodeType,k.tbody=!b.getElementsByTagName("tbody").length,k.htmlSerialize=!!b.getElementsByTagName("link").length,k.html5Clone="<:nav>"!==y.createElement("nav").cloneNode(!0).outerHTML,a.type="checkbox",a.checked=!0,c.appendChild(a),k.appendChecked=a.checked,b.innerHTML="",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML="",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(k[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),k[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\.(.+)|)$/;function ab(){return!0}function bb(){return!1}function cb(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(E)||[""],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(".")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[m.expando]?b:new m.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,"events")||{})[b.type]&&m._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,"events")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h]","i"),hb=/^\s+/,ib=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,jb=/<([\w:]+)/,kb=/\s*$/g,rb={option:[1,""],legend:[1,"
","
"],area:[1,"",""],param:[1,"",""],thead:[1,"","
"],tr:[2,"","
"],col:[2,"","
"],td:[3,"","
"],_default:k.htmlSerialize?[0,"",""]:[1,"X
","
"]},sb=db(y),tb=sb.appendChild(y.createElement("div"));rb.optgroup=rb.option,rb.tbody=rb.tfoot=rb.colgroup=rb.caption=rb.thead,rb.th=rb.td;function ub(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==K?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ub(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function vb(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wb(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function xb(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}function yb(a){var b=pb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function zb(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval",!b||m._data(b[d],"globalEval"))}function Ab(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Bb(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}"script"===c&&b.text!==a.text?(xb(b).text=a.text,yb(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!gb.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(tb.innerHTML=a.outerHTML,tb.removeChild(f=tb.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ub(f),h=ub(a),g=0;null!=(e=h[g]);++g)d[g]&&Bb(e,d[g]);if(b)if(c)for(h=h||ub(a),d=d||ub(f),g=0;null!=(e=h[g]);g++)Ab(e,d[g]);else Ab(a,f);return d=ub(f,"script"),d.length>0&&zb(d,!i&&ub(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=db(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if("object"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(lb.test(f)){h=h||o.appendChild(b.createElement("div")),i=(jb.exec(f)||["",""])[1].toLowerCase(),l=rb[i]||rb._default,h.innerHTML=l[1]+f.replace(ib,"<$1>")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&hb.test(f)&&p.push(b.createTextNode(hb.exec(f)[0])),!k.tbody){f="table"!==i||kb.test(f)?""!==l[1]||kb.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ub(p,"input"),vb),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ub(o.appendChild(f),"script"),g&&zb(h),c)){e=0;while(f=h[e++])ob.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ub(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&zb(ub(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ub(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fb,""):void 0;if(!("string"!=typeof a||mb.test(a)||!k.htmlSerialize&&gb.test(a)||!k.leadingWhitespace&&hb.test(a)||rb[(jb.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(ib,"<$1>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ub(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ub(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&nb.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ub(i,"script"),xb),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ub(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,m.map(g,yb),j=0;f>j;j++)d=g[j],ob.test(d.type||"")&&!m._data(d,"globalEval")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||"").replace(qb,"")));i=c=null}return this}}),m.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Cb,Db={};function Eb(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],"display");return e.detach(),f}function Fb(a){var b=y,c=Db[a];return c||(c=Eb(a,b),"none"!==c&&c||(Cb=(Cb||m("