mirror of
https://github.com/thegeeklab/ansible-later.git
synced 2024-11-25 22:30:42 +00:00
fixes for package include data
This commit is contained in:
parent
051d132ea2
commit
bdc386115a
@ -1,3 +1,3 @@
|
||||
include README.md
|
||||
include LICENSE
|
||||
recursive-include ansiblelater/data
|
||||
recursive-include ansiblelater/data *
|
||||
|
2
setup.py
2
setup.py
@ -37,7 +37,7 @@ setup(
|
||||
long_description=get_readme(),
|
||||
long_description_content_type="text/markdown",
|
||||
packages=find_packages(exclude=["tests", "tests.*"]),
|
||||
# package_data={'ansiblelater': ['data/*']},
|
||||
package_data={'ansiblelater': ['data/*']},
|
||||
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,,!=3.4.*",
|
||||
classifiers=[
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
|
10
testenv/bin/ansible-later
Executable file
10
testenv/bin/ansible-later
Executable file
@ -0,0 +1,10 @@
|
||||
#!/Users/rkau2905/Devel/python/private/ansible-later/testenv/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
|
||||
from ansiblelater.__main__ import main
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
@ -0,0 +1 @@
|
||||
pip
|
@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2018 Robert Kaussow
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
@ -0,0 +1,395 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: ansible-later
|
||||
Version: 0.2.4
|
||||
Summary: Reviews ansible playbooks, roles and inventories and suggests improvements.
|
||||
Home-page: https://github.com/xoxys/ansible-later
|
||||
Author: Robert Kaussow
|
||||
Author-email: mail@geeklabor.de
|
||||
License: MIT
|
||||
Keywords: ansible code review
|
||||
Platform: UNKNOWN
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Environment :: Console
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Intended Audience :: Information Technology
|
||||
Classifier: Intended Audience :: System Administrators
|
||||
Classifier: Natural Language :: English
|
||||
Classifier: Operating System :: POSIX
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.5
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Topic :: Utilities
|
||||
Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,,!=3.4.*
|
||||
Description-Content-Type: text/markdown
|
||||
Requires-Dist: ansible
|
||||
Requires-Dist: six
|
||||
Requires-Dist: pyyaml
|
||||
Requires-Dist: appdirs
|
||||
Requires-Dist: unidiff
|
||||
Requires-Dist: flake8
|
||||
Requires-Dist: yamllint
|
||||
Requires-Dist: nested-lookup
|
||||
Requires-Dist: colorama
|
||||
Requires-Dist: anyconfig
|
||||
Requires-Dist: python-json-logger
|
||||
Requires-Dist: jsonschema
|
||||
Requires-Dist: pathspec
|
||||
Requires-Dist: toolz
|
||||
|
||||
# ansible-later
|
||||
|
||||
[![Build Status](https://cloud.drone.io/api/badges/xoxys/ansible-later/status.svg)](https://cloud.drone.io/xoxys/ansible-later)
|
||||
[![](https://img.shields.io/pypi/pyversions/ansible-later.svg)](https://pypi.org/project/ansible-later/)
|
||||
[![](https://img.shields.io/pypi/status/ansible-later.svg)](https://pypi.org/project/ansible-later/)
|
||||
[![](https://img.shields.io/pypi/v/ansible-later.svg)](https://pypi.org/project/ansible-later/)
|
||||
[![codecov](https://codecov.io/gh/xoxys/ansible-later/branch/master/graph/badge.svg)](https://codecov.io/gh/xoxys/ansible-later)
|
||||
|
||||
This is a fork of Will Thames [ansible-review](https://github.com/willthames/ansible-review) so credits goes to him
|
||||
for his work on ansible-review and ansible-lint.
|
||||
|
||||
`ansible-later` is a best pratice scanner and linting tool. In most cases, if you write ansibel roles in a team,
|
||||
it helps to have a coding or best practice guideline in place. This will make ansible roles more readable for all
|
||||
maintainers and can reduce the troubleshooting time.
|
||||
|
||||
`ansible-later` does _**not**_ ensure that your role will work as expected. For Deployment test you can use other tools
|
||||
like [molecule](https://github.com/ansible/molecule).
|
||||
|
||||
The project name is an acronym for **L**ovely **A**utomation **TE**sting f**R**mework.
|
||||
|
||||
## Table of Content
|
||||
|
||||
- [Setup](#setup)
|
||||
- [Using pip](#using-pip)
|
||||
- [From source](#from-source)
|
||||
- [Configuration](#configuration)
|
||||
- [Default settings](#default-settings)
|
||||
- [CLI Options](#cli-options)
|
||||
- [Usage](#usage)
|
||||
- [Buildin rules](#buildin-rules)
|
||||
- [Build your own rules](#build-your-own-rules)
|
||||
- [The standards file](#the-standards-file)
|
||||
- [Candidates](#candidates)
|
||||
- [Minimal standards checks](#minimal-standards-checks)
|
||||
- [License](#license)
|
||||
- [Maintainers and Contributors](#maintainers-and-contributors)
|
||||
|
||||
---
|
||||
|
||||
### Setup
|
||||
|
||||
#### Using pip
|
||||
|
||||
```Shell
|
||||
# From internal pip repo as user
|
||||
pip install ansible-later --user
|
||||
|
||||
# .. or as root
|
||||
sudo pip install ansible-later
|
||||
```
|
||||
|
||||
#### From source
|
||||
|
||||
```Shell
|
||||
# Install dependency
|
||||
git clone https://github.com/xoxys/ansible-later
|
||||
export PYTHONPATH=$PYTHONPATH:`pwd`/ansible-later/ansiblelater
|
||||
export PATH=$PATH:`pwd`/ansible-later/bin
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
ansible-later comes with some default settigs which should be sufficent for most users to start,
|
||||
but you can adjust most settings to your needs.
|
||||
|
||||
Changes can be made in a yaml configuration file or through cli options
|
||||
which will be processed in the following order (last wins):
|
||||
|
||||
- default config (build-in)
|
||||
- global config file (this will depend on your operating system)
|
||||
- folderbased config file (`.later.yml` file in current working folder)
|
||||
- cli options
|
||||
|
||||
Be careful! YAML Attributes will be overwritten while lists in any
|
||||
config file will be merged.
|
||||
|
||||
To make it easier to review a singel file e.g. for debugging purpose, amsible-later
|
||||
will ignore `exclude_files` and `ignore_dotfiles` options.
|
||||
|
||||
#### Default settings
|
||||
|
||||
```YAML
|
||||
---
|
||||
ansible:
|
||||
# Add the name of used custom ansible modules.
|
||||
# Otherwise ansible-later can't detect unknown modules
|
||||
# and will through an error.
|
||||
custom_modules: []
|
||||
# Settings for variable formatting rule (ANSIBLE0004)
|
||||
double-braces:
|
||||
max-spaces-inside: 1
|
||||
min-spaces-inside: 1
|
||||
|
||||
# Global logging configuration
|
||||
# If you would like to force colored output (e.g. non-tty)
|
||||
# set emvironment variable `PY_COLORS=1`
|
||||
logging:
|
||||
# You can enable json logging if a parsable output is required
|
||||
json: False
|
||||
# Possible options debug | info | warning | error | critical
|
||||
level: "warning"
|
||||
|
||||
# Global settings for all defined rules
|
||||
rules:
|
||||
# list of files to exclude
|
||||
exclude_files: []
|
||||
# Examples:
|
||||
# - molecule/
|
||||
# - files/**/*.py
|
||||
|
||||
# Limit checks to given rule ID's
|
||||
# If empty all rules will be used.
|
||||
filter: []
|
||||
|
||||
# Exclude given rule ID's from checks
|
||||
exclude_filter: []
|
||||
|
||||
# All dotfiles (including hidden folders) are excluded by default.
|
||||
# You can disable this setting and handle dotfiles by yourself with `exclude_files`.
|
||||
ignore_dotfiles: True
|
||||
# Path to the folder containing your custom standards file
|
||||
standards: ansiblelater/data
|
||||
|
||||
# Block to control included yamlllint rules.
|
||||
# See https://yamllint.readthedocs.io/en/stable/rules.html
|
||||
yamllint:
|
||||
colons:
|
||||
max-spaces-after: 1
|
||||
max-spaces-before: 0
|
||||
document-start:
|
||||
present: True
|
||||
empty-lines:
|
||||
max: 1
|
||||
max-end: 1
|
||||
max-start: 0
|
||||
hyphens:
|
||||
max-spaces-after: 1
|
||||
indentation:
|
||||
check-multi-line-strings: False
|
||||
indent-sequences: True
|
||||
spaces: 2
|
||||
```
|
||||
|
||||
#### CLI Options
|
||||
|
||||
You can get all available cli options by running `ansible-later --help`:
|
||||
|
||||
```Shell
|
||||
$ ansible-later --help
|
||||
usage: ansible-later [-h] [-c CONFIG_FILE] [-r RULES.STANDARDS]
|
||||
[-s RULES.FILTER] [-v] [-q] [--version]
|
||||
[rules.files [rules.files ...]]
|
||||
|
||||
Validate ansible files against best pratice guideline
|
||||
|
||||
positional arguments:
|
||||
rules.files
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG_FILE, --config CONFIG_FILE
|
||||
location of configuration file
|
||||
-r RULES.STANDARDS, --rules RULES.STANDARDS
|
||||
location of standards rules
|
||||
-s RULES.FILTER, --standards RULES.FILTER
|
||||
limit standards to given ID's
|
||||
-x RULES.EXCLUDE_FILTER, --exclude-standards RULES.EXCLUDE_FILTER
|
||||
exclude standards by given ID's
|
||||
-v increase log level
|
||||
-q decrease log level
|
||||
--version show program's version number and exit
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
```Shell
|
||||
ansible-later FILES
|
||||
```
|
||||
|
||||
If you don't pass any file to ansible-later it will review all files including subdirs in
|
||||
the current working directory (hidden files and folders are excluded by default).
|
||||
|
||||
Otherwise you can pass a space delimited list of files to review. You can also pass glob
|
||||
patterns to ansible-later:
|
||||
|
||||
```Shell
|
||||
# Review single files
|
||||
ansible-later meta/main.yml tasks/install.yml
|
||||
|
||||
# Review all yml files (including subfolders)
|
||||
ansible-later **/*.yml
|
||||
```
|
||||
|
||||
ansible-later will review inventory files, role files, python code (modules, plugins)
|
||||
and playbooks.
|
||||
|
||||
- The goal is that each file that changes in a
|
||||
changeset should be reviewable simply by passing
|
||||
those files as the arguments to ansible-later.
|
||||
- Using `{{ playbook_dir }}` in sub roles is so far
|
||||
very hard.
|
||||
- This should work against various repository styles
|
||||
- per-role repository
|
||||
- roles with sub-roles
|
||||
- per-playbook repository
|
||||
- It should work with roles requirement files and with local roles
|
||||
|
||||
### Buildin rules
|
||||
|
||||
Reviews are nothing without some rules or standards against which to review. ansible-later
|
||||
comes with a couple of built-in checks explained in the following table.
|
||||
|
||||
| Rule | ID | Description | Parameter |
|
||||
|---------------------------------|-------------|-------------------------------------------------------------------|----------------------------------------------------------------------|
|
||||
| check_yaml_empty_lines | LINT0001 | YAML should not contain unnecessarily empty lines. | {max: 1, max-start: 0, max-end: 1} |
|
||||
| check_yaml_indent | LINT0002 | YAML should be correctly indented. | {spaces: 2, check-multi-line-strings: false, indent-sequences: true} |
|
||||
| check_yaml_hyphens | LINT0003 | YAML should use consitent number of spaces after hyphens (-). | {max-spaces-after: 1} |
|
||||
| check_yaml_document_start | LINT0004 | YAML should contain document start marker. | {document-start: {present: true}} |
|
||||
| check_yaml_colons | LINT0005 | YAML should use consitent number of spaces around colons. | {colons: {max-spaces-before: 0, max-spaces-after: 1}} |
|
||||
| check_yaml_file | LINT0006 | Roles file should be in yaml format. | |
|
||||
| check_yaml_has_content | LINT0007 | Files should contain useful content. | |
|
||||
| check_native_yaml | LINT0008 | Use YAML format for tasks and handlers rather than key=value. | |
|
||||
| check_line_between_tasks | ANSIBLE0001 | Single tasks should be separated by an empty line. | |
|
||||
| check_meta_main | ANSIBLE0002 | Meta file should contain a basic subset of parameters. | author, description, min_ansible_version, platforms, dependencies |
|
||||
| check_unique_named_task | ANSIBLE0003 | Tasks and handlers must be uniquely named within a file. | |
|
||||
| check_braces | ANSIBLE0004 | YAML should use consitent number of spaces around variables. | |
|
||||
| check_scm_in_src | ANSIBLE0005 | Use scm key rather than src: scm+url in requirements file. | |
|
||||
| check_named_task | ANSIBLE0006 | Tasks and handlers must be named. | excludes: meta, debug, include\_\*, import\_\*, block |
|
||||
| check_name_format | ANSIBLE0007 | Name of tasks and handlers must be formatted. | formats: first letter capital |
|
||||
| check_command_instead_of_module | ANSIBLE0008 | Commands should not be used in place of modules. | |
|
||||
| check_install_use_latest | ANSIBLE0009 | Package managers should not install with state=latest. | |
|
||||
| check_shell_instead_command | ANSIBLE0010 | Use Shell only when piping, redirecting or chaining commands. | |
|
||||
| check_command_has_changes | ANSIBLE0011 | Commands should be idempotent and only used with some checks. | |
|
||||
| check_empty_string_compare | ANSIBLE0012 | Don't compare to "" - use `when: var` or `when: not var`. | |
|
||||
| check_compare_to_literal_bool | ANSIBLE0013 | Don't compare to True/False - use `when: var` or `when: not var`. | |
|
||||
| check_literal_bool_format | ANSIBLE0014 | Literal bools should be written as `True/False` or `yes/no`. | forbidden values are `true false TRUE FALSE Yes No YES NO` |
|
||||
| check_become_user | ANSIBLE0015 | `become` should be always used combined with `become_user`. | |
|
||||
| check_filter_separation | ANSIBLE0016 | Jinja2 filters should be separated with spaces. | |
|
||||
|
||||
### Build your own rules
|
||||
|
||||
#### The standards file
|
||||
|
||||
A standards file comprises a list of standards, and optionally some methods to
|
||||
check those standards.
|
||||
|
||||
Create a file called standards.py (this can import other modules)
|
||||
|
||||
```Python
|
||||
from ansiblelater include Standard, Result
|
||||
|
||||
tasks_are_uniquely_named = Standard(dict(
|
||||
# ID's are optional but if you use ID's they have to be unique
|
||||
id="ANSIBLE0003",
|
||||
# Short description of the standard goal
|
||||
name="Tasks and handlers must be uniquely named within a single file",
|
||||
check=check_unique_named_task,
|
||||
version="0.1",
|
||||
types=["playbook", "task", "handler"],
|
||||
))
|
||||
|
||||
standards = [
|
||||
tasks_are_uniquely_named,
|
||||
role_must_contain_meta_main,
|
||||
]
|
||||
```
|
||||
|
||||
When you add new standards, you should increment the version of your standards.
|
||||
Your playbooks and roles should declare what version of standards you are
|
||||
using, otherwise ansible-later assumes you're using the latest. The declaration
|
||||
is done by adding standards version as first line in the file. e.g.
|
||||
|
||||
```INI
|
||||
# Standards: 1.2
|
||||
```
|
||||
|
||||
To add standards that are advisory, don't set the version. These will cause
|
||||
a message to be displayed but won't constitute a failure.
|
||||
|
||||
When a standard version is higher than declared version, a message will be
|
||||
displayed 'WARN: Future standard' and won't constitute a failure.
|
||||
|
||||
An example standards file is available at
|
||||
[ansiblelater/examples/standards.py](ansiblelater/examples/standards.py)
|
||||
|
||||
If you only want to check one or two standards quickly (perhaps you want
|
||||
to review your entire code base for deprecated bare words), you can use the
|
||||
`-s` flag with the name of your standard. You can pass `-s` multiple times.
|
||||
|
||||
```Shell
|
||||
git ls-files | xargs ansible-later -s "bare words are deprecated for with_items"
|
||||
```
|
||||
|
||||
You can see the name of the standards being checked for each different file by running
|
||||
`ansible-later` with the `-v` option.
|
||||
|
||||
#### Candidates
|
||||
|
||||
Each file passed to `ansible-later` will be classified. The result is a `Candidate` object
|
||||
which contains some meta informations and is an instance of one of following object types.
|
||||
|
||||
| Object type | Description |
|
||||
|-------------|------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Task | all files within the parent dir `tasks` |
|
||||
| Handler | all files within the parent dir `handler` |
|
||||
| RoleVars | all files within the parent dir `vars` or `default` |
|
||||
| GroupVars | all files (including subdirs) within the parent dir `group_vars` |
|
||||
| HostVars | all files (including subdirs) within the parent dir `host_vars` |
|
||||
| Meta | all files within the parent dir `meta` |
|
||||
| Code | all files within the parent dir `library`, `lookup_plugins`, `callback_plugins` and `filter_plugins` or python files (`.py`) |
|
||||
| Inventory | all files within the parent dir `inventories` and `inventory` or `hosts` as filename |
|
||||
| Rolesfile | all files with `rolesfile` or `requirements` in filename |
|
||||
| Makefile | all files with `Makefile` in filename |
|
||||
| Template | all files (including subdirs) within the parent dir `templates` or jinja2 files (`.j2`) |
|
||||
| File | all files (including subdirs) within the parent dir `files` |
|
||||
| Playbook | all yaml files (`.yml` or `.yaml`) not maching a previous object type |
|
||||
| Doc | all files with `README` in filename |
|
||||
|
||||
#### Minimal standards checks
|
||||
|
||||
A typical standards check will look like:
|
||||
|
||||
```Python
|
||||
def check_playbook_for_something(candidate, settings):
|
||||
result = Result(candidate.path) # empty result is a success with no output
|
||||
with open(candidate.path, 'r') as f:
|
||||
for (lineno, line) in enumerate(f):
|
||||
if line is dodgy:
|
||||
# enumerate is 0-based so add 1 to lineno
|
||||
result.errors.append(Error(lineno+1, "Line is dodgy: reasons"))
|
||||
return result
|
||||
```
|
||||
|
||||
All standards check take a candidate object, which has a path attribute.
|
||||
The type can be inferred from the class name (i.e. `type(candidate).__name__`)
|
||||
or from the table [here](#candidates).
|
||||
|
||||
They return a `Result` object, which contains a possibly empty list of `Error`
|
||||
objects. `Error` objects are formed of a line number and a message. If the
|
||||
error applies to the whole file being reviewed, set the line number to `None`.
|
||||
Line numbers are important as `ansible-later` can review just ranges of files
|
||||
to only review changes (e.g. through piping the output of `git diff` to
|
||||
`ansible-later`).
|
||||
|
||||
### License
|
||||
|
||||
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
||||
|
||||
### Maintainers and Contributors
|
||||
|
||||
[Robert Kaussow](https://github.com/xoxys)
|
||||
|
||||
|
@ -0,0 +1,44 @@
|
||||
../../../bin/ansible-later,sha256=M0Fl6Du6CPpNFKFw78DgVju368SwT3YHtwsbl-EoWH4,276
|
||||
ansible_later-0.2.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
ansible_later-0.2.4.dist-info/LICENSE,sha256=soSVCh6zxkA3f4_Y7LH7QT5FjYNWJ7Ul8j34Yd6u9uY,1071
|
||||
ansible_later-0.2.4.dist-info/METADATA,sha256=1yVMmwMrALh8-7EDj1CckL5w07lVRr_HiHsnUDDStXs,18732
|
||||
ansible_later-0.2.4.dist-info/RECORD,,
|
||||
ansible_later-0.2.4.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110
|
||||
ansible_later-0.2.4.dist-info/entry_points.txt,sha256=eP7ly93pi_QU0DvD2ZNeKTQN_dk-FCKtqEw0qHcC6dg,62
|
||||
ansible_later-0.2.4.dist-info/top_level.txt,sha256=UqilO4utzab1ajjDBSXs1rDsNcrJ2Qiy8C2kNewPm_c,13
|
||||
ansiblelater/__init__.py,sha256=PjyHxI1Jg8CWh8YafI0xVdCIfQsM_f5ywF9_YG_9E_M,292
|
||||
ansiblelater/__init__.pyc,,
|
||||
ansiblelater/__main__.py,sha256=Y_cZede8OYlhyNFVjktg6YN3WXGKiDKftcRYDWIkB48,2853
|
||||
ansiblelater/__main__.pyc,,
|
||||
ansiblelater/command/__init__.py,sha256=mJkaZzvYgBnxsBAGv8y_P1yzonHqWgw6VF2Zs4rmJEA,7
|
||||
ansiblelater/command/__init__.pyc,,
|
||||
ansiblelater/command/base.py,sha256=zc_8d7G_1P6L-RbwvPtM-6MWpMggsujj63JNEAxaidQ,1970
|
||||
ansiblelater/command/base.pyc,,
|
||||
ansiblelater/command/candidates.py,sha256=KmjUZfwiANCvuov4BprX5xmhaqbzx-JQ-oO2Xq0YGj4,10516
|
||||
ansiblelater/command/candidates.pyc,,
|
||||
ansiblelater/exceptions.py,sha256=CsUAxZB80Ho-eEydxgpb6Z7efd8CkcnqOPOdeSO_Vak,692
|
||||
ansiblelater/exceptions.pyc,,
|
||||
ansiblelater/logger.py,sha256=x-2O9OWi5YOc-dWuHiIHuxIAAfDv7iaclyLo_53dMgs,5160
|
||||
ansiblelater/logger.pyc,,
|
||||
ansiblelater/rules/__init__.py,sha256=mJkaZzvYgBnxsBAGv8y_P1yzonHqWgw6VF2Zs4rmJEA,7
|
||||
ansiblelater/rules/__init__.pyc,,
|
||||
ansiblelater/rules/ansiblefiles.py,sha256=K6l56XZUCIyRP_paigEbJYT-236N9BkBdC30uAhM09Y,10291
|
||||
ansiblelater/rules/ansiblefiles.pyc,,
|
||||
ansiblelater/rules/rolefiles.py,sha256=YT8U3Yfpz1tW6PeP-h2-7jb9giintPwSmLPzhV46aaE,1197
|
||||
ansiblelater/rules/rolefiles.pyc,,
|
||||
ansiblelater/rules/taskfiles.py,sha256=tbjOH7Iy5rvbzFIzV-LegQYWEAxHin9vNW1n-Pauun8,1061
|
||||
ansiblelater/rules/taskfiles.pyc,,
|
||||
ansiblelater/rules/yamlfiles.py,sha256=5SuR4U85EFM1Khnac0JVXDvI593bkp9Q6KRrbFa38U0,3571
|
||||
ansiblelater/rules/yamlfiles.pyc,,
|
||||
ansiblelater/settings.py,sha256=npvpI_zWls8E7GdW6HKJ-CW4vWGH0sz1wpIlToP_zHA,6079
|
||||
ansiblelater/settings.pyc,,
|
||||
ansiblelater/standard.py,sha256=Km6bZ1DcdauLkGTD1v2mZopKEzcc7Om-v3RiQf9H3oE,825
|
||||
ansiblelater/standard.pyc,,
|
||||
ansiblelater/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
ansiblelater/tests/__init__.pyc,,
|
||||
ansiblelater/utils/__init__.py,sha256=DuxPfR6xkPx3CTvu3YWL-cSkiZhNu7ENUg-Lal7Xi5Y,2565
|
||||
ansiblelater/utils/__init__.pyc,,
|
||||
ansiblelater/utils/rulehelper.py,sha256=1pURLvz8bUiOwvQtcamphn8HyUt03nj30dvOwNjvF9Q,4519
|
||||
ansiblelater/utils/rulehelper.pyc,,
|
||||
ansiblelater/utils/yamlhelper.py,sha256=COmnT1ksmMi7eFCZR5DkzLphcKHszcD1Tqn_YJAZMyk,18811
|
||||
ansiblelater/utils/yamlhelper.pyc,,
|
@ -0,0 +1,6 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.33.1)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py2-none-any
|
||||
Tag: py3-none-any
|
||||
|
@ -0,0 +1,3 @@
|
||||
[console_scripts]
|
||||
ansible-later = ansiblelater.__main__:main
|
||||
|
@ -0,0 +1 @@
|
||||
ansiblelater
|
13
testenv/lib/python2.7/site-packages/ansiblelater/__init__.py
Normal file
13
testenv/lib/python2.7/site-packages/ansiblelater/__init__.py
Normal file
@ -0,0 +1,13 @@
|
||||
"""Default package."""
|
||||
|
||||
__author__ = "Robert Kaussow"
|
||||
__project__ = "ansible-later"
|
||||
__version__ = "0.2.4"
|
||||
__license__ = "MIT"
|
||||
__maintainer__ = "Robert Kaussow"
|
||||
__email__ = "mail@geeklabor.de"
|
||||
__status__ = "Production"
|
||||
|
||||
from ansiblelater import logger
|
||||
|
||||
LOG = logger.get_logger("ansiblelater")
|
83
testenv/lib/python2.7/site-packages/ansiblelater/__main__.py
Normal file
83
testenv/lib/python2.7/site-packages/ansiblelater/__main__.py
Normal file
@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env python
|
||||
"""Main program."""
|
||||
|
||||
import argparse
|
||||
import multiprocessing
|
||||
import sys
|
||||
|
||||
from ansiblelater import LOG
|
||||
from ansiblelater import __version__
|
||||
from ansiblelater import logger
|
||||
from ansiblelater.command import base
|
||||
from ansiblelater.command import candidates
|
||||
|
||||
|
||||
def main():
|
||||
"""Run main program."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Validate ansible files against best pratice guideline")
|
||||
parser.add_argument("-c", "--config", dest="config_file",
|
||||
help="location of configuration file")
|
||||
parser.add_argument("-r", "--rules", dest="rules.standards",
|
||||
help="location of standards rules")
|
||||
parser.add_argument("-s", "--standards", dest="rules.filter", action="append",
|
||||
help="limit standards to given ID's")
|
||||
parser.add_argument("-x", "--exclude-standards", dest="rules.exclude_filter", action="append",
|
||||
help="exclude standards by given ID's")
|
||||
parser.add_argument("-v", dest="logging.level", action="append_const", const=-1,
|
||||
help="increase log level")
|
||||
parser.add_argument("-q", dest="logging.level", action="append_const",
|
||||
const=1, help="decrease log level")
|
||||
parser.add_argument("rules.files", nargs="*")
|
||||
parser.add_argument("--version", action="version", version="%(prog)s {}".format(__version__))
|
||||
|
||||
args = parser.parse_args().__dict__
|
||||
|
||||
settings = base.get_settings(args)
|
||||
config = settings.config
|
||||
|
||||
logger.update_logger(LOG, config["logging"]["level"], config["logging"]["json"])
|
||||
|
||||
files = config["rules"]["files"]
|
||||
standards = base.get_standards(config["rules"]["standards"])
|
||||
|
||||
workers = max(multiprocessing.cpu_count() - 2, 2)
|
||||
p = multiprocessing.Pool(workers)
|
||||
tasks = []
|
||||
for filename in files:
|
||||
lines = None
|
||||
candidate = candidates.classify(filename, settings, standards)
|
||||
if candidate:
|
||||
if candidate.binary:
|
||||
LOG.info("Not reviewing binary file %s" % filename)
|
||||
continue
|
||||
if candidate.vault:
|
||||
LOG.info("Not reviewing vault file %s" % filename)
|
||||
continue
|
||||
if lines:
|
||||
LOG.info("Reviewing %s lines %s" % (candidate, lines))
|
||||
else:
|
||||
LOG.info("Reviewing all of %s" % candidate)
|
||||
tasks.append((candidate, settings, lines))
|
||||
else:
|
||||
LOG.info("Couldn't classify file %s" % filename)
|
||||
|
||||
errors = (sum(p.map(_review_wrapper, tasks)))
|
||||
p.close()
|
||||
p.join()
|
||||
|
||||
if not errors == 0:
|
||||
return_code = 1
|
||||
else:
|
||||
return_code = 0
|
||||
|
||||
sys.exit(return_code)
|
||||
|
||||
|
||||
def _review_wrapper(args):
|
||||
(candidate, settings, lines) = args
|
||||
return candidate.review(settings, lines)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1 @@
|
||||
# noqa
|
@ -0,0 +1,59 @@
|
||||
"""Base methods."""
|
||||
|
||||
import importlib
|
||||
import os
|
||||
import sys
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
import ansible
|
||||
import toolz
|
||||
|
||||
from ansiblelater import settings
|
||||
from ansiblelater import utils
|
||||
|
||||
|
||||
def get_settings(args):
|
||||
"""
|
||||
Get new settings object.
|
||||
|
||||
:param args: cli args from argparse
|
||||
:returns: Settings object
|
||||
|
||||
"""
|
||||
config = settings.Settings(
|
||||
args=args,
|
||||
)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_standards(filepath):
|
||||
sys.path.append(os.path.abspath(os.path.expanduser(filepath)))
|
||||
try:
|
||||
standards = importlib.import_module("standards")
|
||||
except ImportError as e:
|
||||
utils.sysexit_with_message(
|
||||
"Could not import standards from directory %s: %s" % (filepath, str(e)))
|
||||
|
||||
if getattr(standards, "ansible_min_version", None) and \
|
||||
LooseVersion(standards.ansible_min_version) > LooseVersion(ansible.__version__):
|
||||
utils.sysexit_with_message("Standards require ansible version %s (current version %s). "
|
||||
"Please upgrade ansible." %
|
||||
(standards.ansible_min_version, ansible.__version__))
|
||||
|
||||
if getattr(standards, "ansible_later_min_version", None) and \
|
||||
LooseVersion(standards.ansible_later_min_version) > LooseVersion(
|
||||
utils.get_property("__version__")):
|
||||
utils.sysexit_with_message(
|
||||
"Standards require ansible-later version %s (current version %s). "
|
||||
"Please upgrade ansible-later." %
|
||||
(standards.ansible_later_min_version, utils.get_property("__version__")))
|
||||
|
||||
normalized_std = (list(toolz.remove(lambda x: x.id == "", standards.standards)))
|
||||
unique_std = len(list(toolz.unique(normalized_std, key=lambda x: x.id)))
|
||||
all_std = len(normalized_std)
|
||||
if not all_std == unique_std:
|
||||
utils.sysexit_with_message(
|
||||
"Detect duplicate ID's in standards definition. Please use unique ID's only.")
|
||||
|
||||
return standards.standards
|
@ -0,0 +1,326 @@
|
||||
"""Review candidates."""
|
||||
|
||||
import codecs
|
||||
import copy
|
||||
import os
|
||||
import re
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from six import iteritems
|
||||
|
||||
from ansiblelater import LOG
|
||||
from ansiblelater import utils
|
||||
from ansiblelater.logger import flag_extra
|
||||
|
||||
try:
|
||||
# Ansible 2.4 import of module loader
|
||||
from ansible.plugins.loader import module_loader
|
||||
except ImportError:
|
||||
try:
|
||||
from ansible.plugins import module_loader
|
||||
except ImportError:
|
||||
from ansible.utils import module_finder as module_loader
|
||||
|
||||
|
||||
class Candidate(object):
|
||||
"""
|
||||
Meta object for all files which later has to process.
|
||||
|
||||
Each file passed to later will be classified by type and
|
||||
bundled with necessary meta informations for rule processing.
|
||||
"""
|
||||
|
||||
def __init__(self, filename, settings={}, standards=[]):
|
||||
self.path = filename
|
||||
self.binary = False
|
||||
self.vault = False
|
||||
self.filetype = type(self).__name__.lower()
|
||||
self.expected_version = True
|
||||
self.standards = self._get_standards(settings, standards)
|
||||
|
||||
try:
|
||||
with codecs.open(filename, mode="rb", encoding="utf-8") as f:
|
||||
if f.readline().startswith("$ANSIBLE_VAULT"):
|
||||
self.vault = True
|
||||
except UnicodeDecodeError:
|
||||
self.binary = True
|
||||
|
||||
self.version = self._get_version(settings)
|
||||
|
||||
def _get_version(self, settings):
|
||||
path = self.path
|
||||
version = None
|
||||
|
||||
if not self.binary:
|
||||
if isinstance(self, RoleFile):
|
||||
parentdir = os.path.dirname(os.path.abspath(self.path))
|
||||
while parentdir != os.path.dirname(parentdir):
|
||||
meta_file = os.path.join(parentdir, "meta", "main.yml")
|
||||
if os.path.exists(meta_file):
|
||||
path = meta_file
|
||||
break
|
||||
parentdir = os.path.dirname(parentdir)
|
||||
|
||||
version_re = re.compile(r"^# Standards:\s*([\d.]+)")
|
||||
|
||||
with codecs.open(path, mode="rb", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
match = version_re.match(line)
|
||||
if match:
|
||||
version = match.group(1)
|
||||
|
||||
if not version:
|
||||
version = utils.standards_latest(self.standards)
|
||||
if self.expected_version:
|
||||
if isinstance(self, RoleFile):
|
||||
LOG.warning(
|
||||
"%s %s is in a role that contains a meta/main.yml without a declared "
|
||||
"standards version. "
|
||||
"Using latest standards version %s" %
|
||||
(type(self).__name__, self.path, version))
|
||||
else:
|
||||
LOG.warning(
|
||||
"%s %s does not present standards version. "
|
||||
"Using latest standards version %s" %
|
||||
(type(self).__name__, self.path, version))
|
||||
else:
|
||||
LOG.info("%s %s declares standards version %s" %
|
||||
(type(self).__name__, self.path, version))
|
||||
|
||||
return version
|
||||
|
||||
def _get_standards(self, settings, standards):
|
||||
target_standards = []
|
||||
includes = settings.config["rules"]["filter"]
|
||||
excludes = settings.config["rules"]["exclude_filter"]
|
||||
|
||||
if len(includes) == 0:
|
||||
includes = [s.id for s in standards]
|
||||
|
||||
for standard in standards:
|
||||
if standard.id in includes and standard.id not in excludes:
|
||||
target_standards.append(standard)
|
||||
|
||||
return target_standards
|
||||
|
||||
def review(self, settings, lines=None):
|
||||
errors = 0
|
||||
|
||||
for standard in self.standards:
|
||||
if type(self).__name__.lower() not in standard.types:
|
||||
continue
|
||||
|
||||
result = standard.check(self, settings.config)
|
||||
|
||||
if not result:
|
||||
utils.sysexit_with_message("Standard '{}' returns an empty result object.".format(
|
||||
standard.check.__name__))
|
||||
|
||||
labels = {"tag": "review", "standard": standard.name, "file": self.path, "passed": True}
|
||||
|
||||
if standard.id and standard.id.strip():
|
||||
labels["id"] = standard.id
|
||||
|
||||
for err in [err for err in result.errors
|
||||
if not err.lineno or utils.is_line_in_ranges(err.lineno, utils.lines_ranges(lines))]: # noqa
|
||||
err_labels = copy.copy(labels)
|
||||
err_labels["passed"] = False
|
||||
if isinstance(err, Error):
|
||||
err_labels.update(err.to_dict())
|
||||
|
||||
if not standard.version:
|
||||
LOG.warning("{id}Best practice '{name}' not met:\n{path}:{error}".format(
|
||||
id=self._format_id(standard.id),
|
||||
name=standard.name,
|
||||
path=self.path,
|
||||
error=err), extra=flag_extra(err_labels))
|
||||
elif LooseVersion(standard.version) > LooseVersion(self.version):
|
||||
LOG.warning("{id}Future standard '{name}' not met:\n{path}:{error}".format(
|
||||
id=self._format_id(standard.id),
|
||||
name=standard.name,
|
||||
path=self.path,
|
||||
error=err), extra=flag_extra(err_labels))
|
||||
else:
|
||||
LOG.error("{id}Standard '{name}' not met:\n{path}:{error}".format(
|
||||
id=self._format_id(standard.id),
|
||||
name=standard.name,
|
||||
path=self.path,
|
||||
error=err), extra=flag_extra(err_labels))
|
||||
errors = errors + 1
|
||||
|
||||
return errors
|
||||
|
||||
def _format_id(self, standard_id):
|
||||
if standard_id and standard_id.strip():
|
||||
standard_id = "[{id}] ".format(id=standard_id.strip())
|
||||
|
||||
return standard_id
|
||||
|
||||
def __repr__(self): # noqa
|
||||
return "%s (%s)" % (type(self).__name__, self.path)
|
||||
|
||||
def __getitem__(self, item): # noqa
|
||||
return self.__dict__.get(item)
|
||||
|
||||
|
||||
class RoleFile(Candidate):
|
||||
def __init__(self, filename, settings={}, standards=[]):
|
||||
super(RoleFile, self).__init__(filename, settings, standards)
|
||||
|
||||
parentdir = os.path.dirname(os.path.abspath(filename))
|
||||
while parentdir != os.path.dirname(parentdir):
|
||||
role_modules = os.path.join(parentdir, "library")
|
||||
if os.path.exists(role_modules):
|
||||
module_loader.add_directory(role_modules)
|
||||
break
|
||||
parentdir = os.path.dirname(parentdir)
|
||||
|
||||
|
||||
class Playbook(Candidate):
|
||||
pass
|
||||
|
||||
|
||||
class Task(RoleFile):
|
||||
def __init__(self, filename, settings={}, standards=[]):
|
||||
super(Task, self).__init__(filename, settings, standards)
|
||||
self.filetype = "tasks"
|
||||
|
||||
|
||||
class Handler(RoleFile):
|
||||
def __init__(self, filename, settings={}, standards=[]):
|
||||
super(Handler, self).__init__(filename, settings, standards)
|
||||
self.filetype = "handlers"
|
||||
|
||||
|
||||
class Vars(Candidate):
|
||||
pass
|
||||
|
||||
|
||||
class Unversioned(Candidate):
|
||||
def __init__(self, filename, settings={}, standards=[]):
|
||||
super(Unversioned, self).__init__(filename, settings, standards)
|
||||
self.expected_version = False
|
||||
|
||||
|
||||
class InventoryVars(Unversioned):
|
||||
pass
|
||||
|
||||
|
||||
class HostVars(InventoryVars):
|
||||
pass
|
||||
|
||||
|
||||
class GroupVars(InventoryVars):
|
||||
pass
|
||||
|
||||
|
||||
class RoleVars(RoleFile):
|
||||
pass
|
||||
|
||||
|
||||
class Meta(RoleFile):
|
||||
pass
|
||||
|
||||
|
||||
class Inventory(Unversioned):
|
||||
pass
|
||||
|
||||
|
||||
class Code(Unversioned):
|
||||
pass
|
||||
|
||||
|
||||
class Template(RoleFile):
|
||||
pass
|
||||
|
||||
|
||||
class Doc(Unversioned):
|
||||
pass
|
||||
|
||||
|
||||
class Makefile(Unversioned):
|
||||
pass
|
||||
|
||||
|
||||
class File(RoleFile):
|
||||
pass
|
||||
|
||||
|
||||
class Rolesfile(Unversioned):
|
||||
pass
|
||||
|
||||
|
||||
class Error(object):
|
||||
"""Default error object created if a rule failed."""
|
||||
|
||||
def __init__(self, lineno, message, error_type=None, **kwargs):
|
||||
"""
|
||||
Initialize a new error object and returns None.
|
||||
|
||||
:param lineno: Line number where the error from de rule occures
|
||||
:param message: Detailed error description provided by the rule
|
||||
|
||||
"""
|
||||
self.lineno = lineno
|
||||
self.message = message
|
||||
self.kwargs = kwargs
|
||||
for (key, value) in iteritems(kwargs):
|
||||
setattr(self, key, value)
|
||||
|
||||
def __repr__(self): # noqa
|
||||
if self.lineno:
|
||||
return "%s: %s" % (self.lineno, self.message)
|
||||
else:
|
||||
return " %s" % (self.message)
|
||||
|
||||
def to_dict(self):
|
||||
result = dict(lineno=self.lineno, message=self.message)
|
||||
for (key, value) in iteritems(self.kwargs):
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
|
||||
class Result(object):
|
||||
def __init__(self, candidate, errors=None):
|
||||
self.candidate = candidate
|
||||
self.errors = errors or []
|
||||
|
||||
def message(self):
|
||||
return "\n".join(["{0}:{1}".format(self.candidate, error)
|
||||
for error in self.errors])
|
||||
|
||||
|
||||
def classify(filename, settings={}, standards=[]):
|
||||
parentdir = os.path.basename(os.path.dirname(filename))
|
||||
basename = os.path.basename(filename)
|
||||
|
||||
if parentdir in ["tasks"]:
|
||||
return Task(filename, settings, standards)
|
||||
if parentdir in ["handlers"]:
|
||||
return Handler(filename, settings, standards)
|
||||
if parentdir in ["vars", "defaults"]:
|
||||
return RoleVars(filename, settings, standards)
|
||||
if "group_vars" in filename.split(os.sep):
|
||||
return GroupVars(filename, settings, standards)
|
||||
if "host_vars" in filename.split(os.sep):
|
||||
return HostVars(filename, settings, standards)
|
||||
if parentdir in ["meta"]:
|
||||
return Meta(filename, settings, standards)
|
||||
if parentdir in ["library", "lookup_plugins", "callback_plugins",
|
||||
"filter_plugins"] or filename.endswith(".py"):
|
||||
return Code(filename, settings, standards)
|
||||
if "inventory" == basename or "hosts" == basename or parentdir in ["inventories"]:
|
||||
return Inventory(filename, settings, standards)
|
||||
if "rolesfile" in basename or "requirements" in basename:
|
||||
return Rolesfile(filename, settings, standards)
|
||||
if "Makefile" in basename:
|
||||
return Makefile(filename, settings, standards)
|
||||
if "templates" in filename.split(os.sep) or basename.endswith(".j2"):
|
||||
return Template(filename, settings, standards)
|
||||
if "files" in filename.split(os.sep):
|
||||
return File(filename, settings, standards)
|
||||
if basename.endswith(".yml") or basename.endswith(".yaml"):
|
||||
return Playbook(filename, settings, standards)
|
||||
if "README" in basename:
|
||||
return Doc(filename, settings, standards)
|
||||
return None
|
@ -0,0 +1,26 @@
|
||||
"""Custom exceptions."""
|
||||
|
||||
import re
|
||||
|
||||
|
||||
class LaterError(Exception):
|
||||
"""Generic exception for later."""
|
||||
|
||||
def __init__(self, msg, original):
|
||||
"""Initialize new exception."""
|
||||
super(LaterError, self).__init__(msg + (": %s" % original))
|
||||
self.original = original
|
||||
|
||||
|
||||
class LaterAnsibleError(Exception):
|
||||
"""Wrapper for ansible syntax errors."""
|
||||
|
||||
def __init__(self, msg, original):
|
||||
lines = original.message.splitlines()
|
||||
|
||||
line_no = re.search("line(.*?),", lines[2])
|
||||
column_no = re.search("column(.*?),", lines[2])
|
||||
|
||||
self.message = lines[0]
|
||||
self.line = line_no.group(1).strip()
|
||||
self.column = column_no.group(1).strip()
|
183
testenv/lib/python2.7/site-packages/ansiblelater/logger.py
Normal file
183
testenv/lib/python2.7/site-packages/ansiblelater/logger.py
Normal file
@ -0,0 +1,183 @@
|
||||
"""Global logging helpers."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
import colorama
|
||||
from ansible.module_utils.parsing.convert_bool import boolean as to_bool
|
||||
from pythonjsonlogger import jsonlogger
|
||||
from six import iteritems
|
||||
|
||||
CONSOLE_FORMAT = "%(levelname)s: %(message)s"
|
||||
JSON_FORMAT = "(asctime) (levelname) (message)"
|
||||
|
||||
|
||||
def _should_do_markup():
|
||||
|
||||
py_colors = os.environ.get("PY_COLORS", None)
|
||||
if py_colors is not None:
|
||||
return to_bool(py_colors, strict=False)
|
||||
|
||||
return sys.stdout.isatty() and os.environ.get("TERM") != "dumb"
|
||||
|
||||
|
||||
colorama.init(autoreset=True, strip=not _should_do_markup())
|
||||
|
||||
|
||||
def flag_extra(extra):
|
||||
"""Ensure extra args are prefixed."""
|
||||
flagged = dict()
|
||||
|
||||
if isinstance(extra, dict):
|
||||
for key, value in iteritems(extra):
|
||||
flagged["later_" + key] = value
|
||||
|
||||
return flagged
|
||||
|
||||
|
||||
class LogFilter(object):
|
||||
"""A custom log filter which excludes log messages above the logged level."""
|
||||
|
||||
def __init__(self, level):
|
||||
"""
|
||||
Initialize a new custom log filter.
|
||||
|
||||
:param level: Log level limit
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.__level = level
|
||||
|
||||
def filter(self, logRecord): # noqa
|
||||
# https://docs.python.org/3/library/logging.html#logrecord-attributes
|
||||
return logRecord.levelno <= self.__level
|
||||
|
||||
|
||||
class MultilineFormatter(logging.Formatter):
|
||||
"""Logging Formatter to reset color after newline characters."""
|
||||
|
||||
def format(self, record): # noqa
|
||||
record.msg = record.msg.replace("\n", "\n{}... ".format(colorama.Style.RESET_ALL))
|
||||
return logging.Formatter.format(self, record)
|
||||
|
||||
|
||||
class MultilineJsonFormatter(jsonlogger.JsonFormatter):
|
||||
"""Logging Formatter to remove newline characters."""
|
||||
|
||||
def format(self, record): # noqa
|
||||
record.msg = record.msg.replace("\n", " ")
|
||||
return jsonlogger.JsonFormatter.format(self, record)
|
||||
|
||||
|
||||
def get_logger(name=None, level=logging.DEBUG, json=False):
|
||||
"""
|
||||
Build a logger with the given name and returns the logger.
|
||||
|
||||
:param name: The name for the logger. This is usually the module name, `__name__`.
|
||||
:param level: Initialize the new logger with given log level.
|
||||
:param json: Boolean flag to enable json formatted log output.
|
||||
:return: logger object
|
||||
|
||||
"""
|
||||
logger = logging.getLogger(name)
|
||||
logger.setLevel(level)
|
||||
logger.addHandler(_get_error_handler(json=json))
|
||||
logger.addHandler(_get_warn_handler(json=json))
|
||||
logger.addHandler(_get_info_handler(json=json))
|
||||
logger.addHandler(_get_critical_handler(json=json))
|
||||
logger.propagate = False
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
def update_logger(logger, level=None, json=None):
|
||||
"""Update logger configuration to change logging settings."""
|
||||
for handler in logger.handlers[:]:
|
||||
logger.removeHandler(handler)
|
||||
|
||||
logger.setLevel(level)
|
||||
logger.addHandler(_get_error_handler(json=json))
|
||||
logger.addHandler(_get_warn_handler(json=json))
|
||||
logger.addHandler(_get_info_handler(json=json))
|
||||
logger.addHandler(_get_critical_handler(json=json))
|
||||
|
||||
|
||||
def _get_error_handler(json=False):
|
||||
handler = logging.StreamHandler(sys.stderr)
|
||||
handler.setLevel(logging.ERROR)
|
||||
handler.addFilter(LogFilter(logging.ERROR))
|
||||
handler.setFormatter(MultilineFormatter(error(CONSOLE_FORMAT)))
|
||||
|
||||
if json:
|
||||
handler.setFormatter(MultilineJsonFormatter(JSON_FORMAT))
|
||||
|
||||
return handler
|
||||
|
||||
|
||||
def _get_warn_handler(json=False):
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
handler.setLevel(logging.WARN)
|
||||
handler.addFilter(LogFilter(logging.WARN))
|
||||
handler.setFormatter(MultilineFormatter(warn(CONSOLE_FORMAT)))
|
||||
|
||||
if json:
|
||||
handler.setFormatter(MultilineJsonFormatter(JSON_FORMAT))
|
||||
|
||||
return handler
|
||||
|
||||
|
||||
def _get_info_handler(json=False):
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
handler.setLevel(logging.INFO)
|
||||
handler.addFilter(LogFilter(logging.INFO))
|
||||
handler.setFormatter(MultilineFormatter(info(CONSOLE_FORMAT)))
|
||||
|
||||
if json:
|
||||
handler.setFormatter(MultilineJsonFormatter(JSON_FORMAT))
|
||||
|
||||
return handler
|
||||
|
||||
|
||||
def _get_critical_handler(json=False):
|
||||
handler = logging.StreamHandler(sys.stderr)
|
||||
handler.setLevel(logging.CRITICAL)
|
||||
handler.addFilter(LogFilter(logging.CRITICAL))
|
||||
handler.setFormatter(MultilineFormatter(critical(CONSOLE_FORMAT)))
|
||||
|
||||
if json:
|
||||
handler.setFormatter(MultilineJsonFormatter(JSON_FORMAT))
|
||||
|
||||
return handler
|
||||
|
||||
|
||||
def critical(message):
|
||||
"""Format critical messages and return string."""
|
||||
return color_text(colorama.Fore.RED, "{}".format(message))
|
||||
|
||||
|
||||
def error(message):
|
||||
"""Format error messages and return string."""
|
||||
return color_text(colorama.Fore.RED, "{}".format(message))
|
||||
|
||||
|
||||
def warn(message):
|
||||
"""Format warn messages and return string."""
|
||||
return color_text(colorama.Fore.YELLOW, "{}".format(message))
|
||||
|
||||
|
||||
def info(message):
|
||||
"""Format info messages and return string."""
|
||||
return color_text(colorama.Fore.BLUE, "{}".format(message))
|
||||
|
||||
|
||||
def color_text(color, msg):
|
||||
"""
|
||||
Colorize strings.
|
||||
|
||||
:param color: colorama color settings
|
||||
:param msg: string to colorize
|
||||
:returns: string
|
||||
|
||||
"""
|
||||
return "{}{}{}".format(color, msg, colorama.Style.RESET_ALL)
|
@ -0,0 +1 @@
|
||||
# noqa
|
@ -0,0 +1,266 @@
|
||||
"""Checks related to ansible specific best practices."""
|
||||
|
||||
import os
|
||||
import re
|
||||
from collections import defaultdict
|
||||
|
||||
from ansiblelater.command.candidates import Error
|
||||
from ansiblelater.command.candidates import Result
|
||||
from ansiblelater.utils import count_spaces
|
||||
from ansiblelater.utils.rulehelper import get_normalized_tasks
|
||||
from ansiblelater.utils.rulehelper import get_normalized_yaml
|
||||
|
||||
|
||||
def check_braces_spaces(candidate, settings):
|
||||
yamllines, errors = get_normalized_yaml(candidate, settings)
|
||||
conf = settings["ansible"]["double-braces"]
|
||||
description = "no suitable numbers of spaces (min: {min} max: {max})".format(
|
||||
min=conf["min-spaces-inside"], max=conf["max-spaces-inside"])
|
||||
|
||||
matches = []
|
||||
braces = re.compile("{{(.*?)}}")
|
||||
|
||||
if not errors:
|
||||
for i, line in yamllines:
|
||||
match = braces.findall(line)
|
||||
if match:
|
||||
for item in match:
|
||||
matches.append((i, item))
|
||||
|
||||
for i, line in matches:
|
||||
[leading, trailing] = count_spaces(line)
|
||||
sum_spaces = leading + trailing
|
||||
|
||||
if (
|
||||
(sum_spaces < conf["min-spaces-inside"] * 2)
|
||||
or (sum_spaces > conf["min-spaces-inside"] * 2)
|
||||
):
|
||||
errors.append(Error(i, description))
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_named_task(candidate, settings):
|
||||
tasks, errors = get_normalized_tasks(candidate, settings)
|
||||
nameless_tasks = ["meta", "debug", "include_role", "import_role",
|
||||
"include_tasks", "import_tasks", "include_vars",
|
||||
"block"]
|
||||
description = "module '%s' used without name attribute"
|
||||
|
||||
if not errors:
|
||||
for task in tasks:
|
||||
module = task["action"]["__ansible_module__"]
|
||||
if "name" not in task and module not in nameless_tasks:
|
||||
errors.append(Error(task["__line__"], description % module))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_name_format(candidate, settings):
|
||||
tasks, errors = get_normalized_tasks(candidate, settings)
|
||||
description = "name '%s' should start with uppercase"
|
||||
namelines = defaultdict(list)
|
||||
|
||||
if not errors:
|
||||
for task in tasks:
|
||||
if "name" in task:
|
||||
namelines[task["name"]].append(task["__line__"])
|
||||
for (name, lines) in namelines.items():
|
||||
if not name[0].isupper():
|
||||
errors.append(Error(lines[-1], description % name))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_unique_named_task(candidate, settings):
|
||||
tasks, errors = get_normalized_tasks(candidate, settings)
|
||||
description = "name '%s' appears multiple times"
|
||||
|
||||
namelines = defaultdict(list)
|
||||
|
||||
if not errors:
|
||||
for task in tasks:
|
||||
if "name" in task:
|
||||
namelines[task["name"]].append(task["__line__"])
|
||||
for (name, lines) in namelines.items():
|
||||
if len(lines) > 1:
|
||||
errors.append(Error(lines[-1], description % name))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_command_instead_of_module(candidate, settings):
|
||||
tasks, errors = get_normalized_tasks(candidate, settings)
|
||||
commands = ["command", "shell", "raw"]
|
||||
modules = {
|
||||
"git": "git", "hg": "hg", "curl": "get_url or uri", "wget": "get_url or uri",
|
||||
"svn": "subversion", "service": "service", "mount": "mount",
|
||||
"rpm": "yum or rpm_key", "yum": "yum", "apt-get": "apt-get",
|
||||
"unzip": "unarchive", "tar": "unarchive", "chkconfig": "service",
|
||||
"rsync": "synchronize", "supervisorctl": "supervisorctl", "systemctl": "systemd",
|
||||
"sed": "template or lineinfile"
|
||||
}
|
||||
description = "%s command used in place of %s module"
|
||||
|
||||
if not errors:
|
||||
for task in tasks:
|
||||
if task["action"]["__ansible_module__"] in commands:
|
||||
if "cmd" in task["action"]:
|
||||
first_cmd_arg = task["action"]["cmd"].split()[0]
|
||||
else:
|
||||
first_cmd_arg = task["action"]["__ansible_arguments__"][0]
|
||||
|
||||
executable = os.path.basename(first_cmd_arg)
|
||||
if (first_cmd_arg and executable in modules
|
||||
and task["action"].get("warn", True) and "register" not in task):
|
||||
errors.append(
|
||||
Error(task["__line__"], description % (executable, modules[executable])))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_install_use_latest(candidate, settings):
|
||||
tasks, errors = get_normalized_tasks(candidate, settings)
|
||||
package_managers = ["yum", "apt", "dnf", "homebrew", "pacman", "openbsd_package", "pkg5",
|
||||
"portage", "pkgutil", "slackpkg", "swdepot", "zypper", "bundler", "pip",
|
||||
"pear", "npm", "yarn", "gem", "easy_install", "bower", "package", "apk",
|
||||
"openbsd_pkg", "pkgng", "sorcery", "xbps"]
|
||||
description = "package installs should use state=present with or without a version"
|
||||
|
||||
if not errors:
|
||||
for task in tasks:
|
||||
if (task["action"]["__ansible_module__"] in package_managers
|
||||
and task["action"].get("state") == "latest"):
|
||||
errors.append(Error(task["__line__"], description))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_shell_instead_command(candidate, settings):
|
||||
tasks, errors = get_normalized_tasks(candidate, settings)
|
||||
description = "shell should only be used when piping, redirecting or chaining commands"
|
||||
|
||||
if not errors:
|
||||
for task in tasks:
|
||||
if task["action"]["__ansible_module__"] == "shell":
|
||||
if "cmd" in task["action"]:
|
||||
cmd = task["action"].get("cmd", [])
|
||||
else:
|
||||
cmd = " ".join(task["action"].get("__ansible_arguments__", []))
|
||||
|
||||
unjinja = re.sub(r"\{\{[^\}]*\}\}", "JINJA_VAR", cmd)
|
||||
if not any([ch in unjinja for ch in "&|<>;$\n*[]{}?"]):
|
||||
errors.append(Error(task["__line__"], description))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_command_has_changes(candidate, settings):
|
||||
tasks, errors = get_normalized_tasks(candidate, settings)
|
||||
commands = ["command", "shell", "raw"]
|
||||
description = "commands should either read information (and thus set changed_when) or not " \
|
||||
"do something if it has already been done (using creates/removes) " \
|
||||
"or only do it if another check has a particular result (when)"
|
||||
|
||||
if not errors:
|
||||
for task in tasks:
|
||||
if task["action"]["__ansible_module__"] in commands:
|
||||
if ("changed_when" not in task and "when" not in task
|
||||
and "when" not in task["__ansible_action_meta__"]
|
||||
and "creates" not in task["action"]
|
||||
and "removes" not in task["action"]):
|
||||
errors.append(Error(task["__line__"], description))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_empty_string_compare(candidate, settings):
|
||||
yamllines, errors = get_normalized_yaml(candidate, settings)
|
||||
description = "use `when: var` rather than `when: var != ""` (or " \
|
||||
"conversely `when: not var` rather than `when: var == ""`)"
|
||||
|
||||
empty_string_compare = re.compile("[=!]= ?[\"'][\"']")
|
||||
|
||||
if not errors:
|
||||
for i, line in yamllines:
|
||||
if empty_string_compare.findall(line):
|
||||
errors.append(Error(i, description))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_compare_to_literal_bool(candidate, settings):
|
||||
yamllines, errors = get_normalized_yaml(candidate, settings)
|
||||
description = "use `when: var` rather than `when: var == True` " \
|
||||
"(or conversely `when: not var`)"
|
||||
|
||||
literal_bool_compare = re.compile("[=!]= ?(True|true|False|false)")
|
||||
|
||||
if not errors:
|
||||
for i, line in yamllines:
|
||||
if literal_bool_compare.findall(line):
|
||||
errors.append(Error(i, description))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_delegate_to_localhost(candidate, settings):
|
||||
tasks, errors = get_normalized_tasks(candidate, settings)
|
||||
description = "connection: local ensures that unexpected delegated_vars " \
|
||||
"don't get set (e.g. {{ inventory_hostname }} " \
|
||||
"used by vars_files)"
|
||||
|
||||
if not errors:
|
||||
for task in tasks:
|
||||
if task.get("delegate_to") == "localhost":
|
||||
errors.append(Error(task["__line__"], description))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_literal_bool_format(candidate, settings):
|
||||
yamllines, errors = get_normalized_yaml(candidate, settings)
|
||||
description = "literal bools should be written as 'True/False' or 'yes/no'"
|
||||
|
||||
uppercase_bool = re.compile(r"([=!]=|:)\s*(true|false|TRUE|FALSE|Yes|No|YES|NO)\s*$")
|
||||
|
||||
if not errors:
|
||||
for i, line in yamllines:
|
||||
if uppercase_bool.findall(line):
|
||||
errors.append(Error(i, description))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_become_user(candidate, settings):
|
||||
tasks, errors = get_normalized_tasks(candidate, settings)
|
||||
description = "the task has 'become:' enabled but 'become_user:' is missing"
|
||||
true_value = [True, "true", "True", "TRUE", "yes", "Yes", "YES"]
|
||||
|
||||
if not errors:
|
||||
gen = (task for task in tasks if "become" in task)
|
||||
for task in gen:
|
||||
if task["become"] in true_value and "become_user" not in task.keys():
|
||||
errors.append(Error(task["__line__"], description))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_filter_separation(candidate, settings):
|
||||
yamllines, errors = get_normalized_yaml(candidate, settings)
|
||||
description = "no suitable numbers of spaces (required: 1)"
|
||||
|
||||
matches = []
|
||||
braces = re.compile("{{(.*?)}}")
|
||||
filters = re.compile(r"(?<=\|)([\s]{2,}[^\s}]+|[^\s]+)|([^\s{]+[\s]{2,}|[^\s]+)(?=\|)")
|
||||
|
||||
if not errors:
|
||||
for i, line in yamllines:
|
||||
match = braces.findall(line)
|
||||
if match:
|
||||
for item in match:
|
||||
matches.append((i, item))
|
||||
|
||||
for i, line in matches:
|
||||
if filters.findall(line):
|
||||
errors.append(Error(i, description))
|
||||
return Result(candidate.path, errors)
|
@ -0,0 +1,35 @@
|
||||
"""Checks related to ansible roles files."""
|
||||
|
||||
from ansible.parsing.yaml.objects import AnsibleMapping
|
||||
from nested_lookup import nested_lookup
|
||||
|
||||
from ansiblelater.command.candidates import Error
|
||||
from ansiblelater.command.candidates import Result
|
||||
from ansiblelater.utils.rulehelper import get_raw_yaml
|
||||
from ansiblelater.utils.rulehelper import get_tasks
|
||||
|
||||
|
||||
def check_meta_main(candidate, settings):
|
||||
content, errors = get_raw_yaml(candidate, settings)
|
||||
keys = ["author", "description", "min_ansible_version", "platforms", "dependencies"]
|
||||
description = "file should contain '%s' key"
|
||||
|
||||
if not errors:
|
||||
for key in keys:
|
||||
if not nested_lookup(key, content):
|
||||
errors.append(Error(None, description % (key)))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_scm_in_src(candidate, settings):
|
||||
roles, errors = get_tasks(candidate, settings)
|
||||
description = "usage of src: scm+url not recommended"
|
||||
|
||||
if not errors:
|
||||
for role in roles:
|
||||
if isinstance(role, AnsibleMapping):
|
||||
if "+" in role.get("src"):
|
||||
errors.append(Error(role["__line__"], description))
|
||||
|
||||
return Result(candidate.path, errors)
|
@ -0,0 +1,32 @@
|
||||
"""Checks related to ansible task files."""
|
||||
|
||||
import re
|
||||
from collections import defaultdict
|
||||
|
||||
from ansiblelater.command.candidates import Error
|
||||
from ansiblelater.command.candidates import Result
|
||||
from ansiblelater.utils.rulehelper import get_normalized_yaml
|
||||
|
||||
|
||||
def check_line_between_tasks(candidate, settings):
|
||||
options = defaultdict(dict)
|
||||
options.update(remove_empty=False)
|
||||
options.update(remove_markers=False)
|
||||
|
||||
lines, errors = get_normalized_yaml(candidate, settings, options)
|
||||
description = "missing task separation (required: 1 empty line)"
|
||||
|
||||
task_regex = re.compile(r"-\sname:.*")
|
||||
prevline = "#file_start_marker"
|
||||
|
||||
allowed_prevline = ["---", "tasks:", "pre_tasks:", "post_tasks:", "block:"]
|
||||
|
||||
if not errors:
|
||||
for i, line in lines:
|
||||
match = task_regex.search(line)
|
||||
if match and prevline:
|
||||
if not any(item in prevline for item in allowed_prevline):
|
||||
errors.append(Error(i, description))
|
||||
prevline = line.strip()
|
||||
|
||||
return Result(candidate.path, errors)
|
@ -0,0 +1,100 @@
|
||||
"""Checks related to generic YAML syntax (yamllint)."""
|
||||
|
||||
import codecs
|
||||
import os
|
||||
|
||||
import yaml
|
||||
|
||||
from ansiblelater.command.candidates import Error
|
||||
from ansiblelater.command.candidates import Result
|
||||
from ansiblelater.utils.rulehelper import get_action_tasks
|
||||
from ansiblelater.utils.rulehelper import get_normalized_task
|
||||
from ansiblelater.utils.rulehelper import get_normalized_yaml
|
||||
from ansiblelater.utils.rulehelper import run_yamllint
|
||||
|
||||
|
||||
def check_yaml_has_content(candidate, settings):
|
||||
lines, errors = get_normalized_yaml(candidate, settings)
|
||||
description = "the file appears to have no useful content"
|
||||
|
||||
if not lines and not errors:
|
||||
errors.append(Error(None, description))
|
||||
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_native_yaml(candidate, settings):
|
||||
tasks, errors = get_action_tasks(candidate, settings)
|
||||
description = "task arguments appear to be in key value rather than YAML format"
|
||||
|
||||
if not errors:
|
||||
for task in tasks:
|
||||
normal_form, error = get_normalized_task(task, candidate, settings)
|
||||
if error:
|
||||
errors.extend(error)
|
||||
break
|
||||
|
||||
action = normal_form["action"]["__ansible_module__"]
|
||||
arguments = normal_form["action"]["__ansible_arguments__"]
|
||||
# Cope with `set_fact` where task["set_fact"] is None
|
||||
if not task.get(action):
|
||||
continue
|
||||
if isinstance(task[action], dict):
|
||||
continue
|
||||
# strip additional newlines off task[action]
|
||||
if task[action].strip().split() != arguments:
|
||||
errors.append(Error(task["__line__"], description))
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_yaml_empty_lines(candidate, settings):
|
||||
options = "rules: {{empty-lines: {conf}}}".format(
|
||||
conf=settings["yamllint"]["empty-lines"])
|
||||
errors = run_yamllint(candidate.path, options)
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_yaml_indent(candidate, settings):
|
||||
options = "rules: {{indentation: {conf}}}".format(
|
||||
conf=settings["yamllint"]["indentation"])
|
||||
errors = run_yamllint(candidate.path, options)
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_yaml_hyphens(candidate, settings):
|
||||
options = "rules: {{hyphens: {conf}}}".format(
|
||||
conf=settings["yamllint"]["hyphens"])
|
||||
errors = run_yamllint(candidate.path, options)
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_yaml_document_start(candidate, settings):
|
||||
options = "rules: {{document-start: {conf}}}".format(
|
||||
conf=settings["yamllint"]["document-start"])
|
||||
errors = run_yamllint(candidate.path, options)
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_yaml_colons(candidate, settings):
|
||||
options = "rules: {{colons: {conf}}}".format(
|
||||
conf=settings["yamllint"]["colons"])
|
||||
errors = run_yamllint(candidate.path, options)
|
||||
return Result(candidate.path, errors)
|
||||
|
||||
|
||||
def check_yaml_file(candidate, settings):
|
||||
errors = []
|
||||
filename = candidate.path
|
||||
|
||||
if os.path.isfile(filename) and os.path.splitext(filename)[1][1:] != "yml":
|
||||
errors.append(
|
||||
Error(None, "file does not have a .yml extension"))
|
||||
elif os.path.isfile(filename) and os.path.splitext(filename)[1][1:] == "yml":
|
||||
with codecs.open(filename, mode="rb", encoding="utf-8") as f:
|
||||
try:
|
||||
yaml.safe_load(f)
|
||||
except Exception as e:
|
||||
errors.append(
|
||||
Error(e.problem_mark.line + 1, "syntax error: %s" % (e.problem)))
|
||||
|
||||
return Result(candidate.path, errors)
|
175
testenv/lib/python2.7/site-packages/ansiblelater/settings.py
Normal file
175
testenv/lib/python2.7/site-packages/ansiblelater/settings.py
Normal file
@ -0,0 +1,175 @@
|
||||
"""Global settings object definition."""
|
||||
|
||||
import os
|
||||
|
||||
import anyconfig
|
||||
import pathspec
|
||||
from appdirs import AppDirs
|
||||
from jsonschema._utils import format_as_index
|
||||
from pkg_resources import resource_filename
|
||||
|
||||
from ansiblelater import utils
|
||||
|
||||
config_dir = AppDirs("ansible-later").user_config_dir
|
||||
default_config_file = os.path.join(config_dir, "config.yml")
|
||||
|
||||
|
||||
class Settings(object):
|
||||
"""
|
||||
Create an object with all necessary settings.
|
||||
|
||||
Settings are loade from multiple locations in defined order (last wins):
|
||||
- default settings defined by `self._get_defaults()`
|
||||
- yaml config file, defaults to OS specific user config dir (https://pypi.org/project/appdirs/)
|
||||
- provides cli parameters
|
||||
"""
|
||||
|
||||
def __init__(self, args={}, config_file=default_config_file):
|
||||
"""
|
||||
Initialize a new settings class.
|
||||
|
||||
:param args: An optional dict of options, arguments and commands from the CLI.
|
||||
:param config_file: An optional path to a yaml config file.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.config_file = config_file
|
||||
self.schema = None
|
||||
self.args_files = False
|
||||
self.args = self._set_args(args)
|
||||
self.config = self._get_config()
|
||||
self._update_filelist()
|
||||
|
||||
def _set_args(self, args):
|
||||
defaults = self._get_defaults()
|
||||
self.config_file = args.get("config_file") or default_config_file
|
||||
|
||||
args.pop("config_file", None)
|
||||
tmp_args = dict(filter(lambda item: item[1] is not None, args.items()))
|
||||
|
||||
tmp_dict = {}
|
||||
for key, value in tmp_args.items():
|
||||
tmp_dict = utils.add_dict_branch(tmp_dict, key.split("."), value)
|
||||
|
||||
# Override correct log level from argparse
|
||||
levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
|
||||
log_level = levels.index(defaults["logging"]["level"])
|
||||
if tmp_dict.get("logging"):
|
||||
for adjustment in tmp_dict["logging"]["level"]:
|
||||
log_level = min(len(levels) - 1, max(log_level + adjustment, 0))
|
||||
tmp_dict["logging"]["level"] = levels[log_level]
|
||||
|
||||
if len(tmp_dict["rules"]["files"]) == 0:
|
||||
tmp_dict["rules"]["files"] = ["*"]
|
||||
else:
|
||||
tmp_dict["rules"]["files"] = tmp_dict["rules"]["files"]
|
||||
self.args_files = True
|
||||
|
||||
return tmp_dict
|
||||
|
||||
def _get_config(self):
|
||||
defaults = self._get_defaults()
|
||||
source_files = []
|
||||
source_files.append(self.config_file)
|
||||
source_files.append(os.path.relpath(
|
||||
os.path.normpath(os.path.join(os.getcwd(), ".later.yml"))))
|
||||
cli_options = self.args
|
||||
|
||||
for config in source_files:
|
||||
if config and os.path.exists(config):
|
||||
with utils.open_file(config) as stream:
|
||||
s = stream.read()
|
||||
sdict = utils.safe_load(s)
|
||||
if self._validate(sdict):
|
||||
anyconfig.merge(defaults, sdict, ac_merge=anyconfig.MS_DICTS)
|
||||
defaults["logging"]["level"] = defaults["logging"]["level"].upper()
|
||||
|
||||
if cli_options and self._validate(cli_options):
|
||||
anyconfig.merge(defaults, cli_options, ac_merge=anyconfig.MS_DICTS)
|
||||
|
||||
return defaults
|
||||
|
||||
def _get_defaults(self):
|
||||
rules_dir = os.path.join(resource_filename("ansiblelater", "data"))
|
||||
defaults = {
|
||||
"rules": {
|
||||
"standards": rules_dir,
|
||||
"filter": [],
|
||||
"exclude_filter": [],
|
||||
"ignore_dotfiles": True,
|
||||
"exclude_files": []
|
||||
},
|
||||
"logging": {
|
||||
"level": "WARNING",
|
||||
"json": False
|
||||
},
|
||||
"ansible": {
|
||||
"custom_modules": [],
|
||||
"double-braces": {
|
||||
"min-spaces-inside": 1,
|
||||
"max-spaces-inside": 1,
|
||||
},
|
||||
},
|
||||
"yamllint": {
|
||||
"empty-lines": {
|
||||
"max": 1,
|
||||
"max-start": 0,
|
||||
"max-end": 1,
|
||||
},
|
||||
"indentation": {
|
||||
"spaces": 2,
|
||||
"check-multi-line-strings": False,
|
||||
"indent-sequences": True,
|
||||
},
|
||||
"hyphens": {
|
||||
"max-spaces-after": 1
|
||||
},
|
||||
"document-start": {
|
||||
"present": True
|
||||
},
|
||||
"colons": {
|
||||
"max-spaces-before": 0,
|
||||
"max-spaces-after": 1
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
self.schema = anyconfig.gen_schema(defaults)
|
||||
|
||||
return defaults
|
||||
|
||||
def _validate(self, config):
|
||||
try:
|
||||
anyconfig.validate(config, self.schema, ac_schema_safe=False)
|
||||
return True
|
||||
except Exception as e:
|
||||
schema_error = "Failed validating '{validator}' in schema{schema}".format(
|
||||
validator=e.validator,
|
||||
schema=format_as_index(list(e.relative_schema_path)[:-1])
|
||||
)
|
||||
utils.sysexit_with_message("{schema}: {msg}".format(schema=schema_error, msg=e.message))
|
||||
|
||||
def _update_filelist(self):
|
||||
includes = self.config["rules"]["files"]
|
||||
excludes = self.config["rules"]["exclude_files"]
|
||||
ignore_dotfiles = self.config["rules"]["ignore_dotfiles"]
|
||||
|
||||
if ignore_dotfiles and not self.args_files:
|
||||
excludes.append(".*")
|
||||
else:
|
||||
del excludes[:]
|
||||
|
||||
filelist = []
|
||||
for root, dirs, files in os.walk("."):
|
||||
for filename in files:
|
||||
filelist.append(
|
||||
os.path.relpath(os.path.normpath(os.path.join(root, filename))))
|
||||
|
||||
valid = []
|
||||
includespec = pathspec.PathSpec.from_lines("gitwildmatch", includes)
|
||||
excludespec = pathspec.PathSpec.from_lines("gitwildmatch", excludes)
|
||||
for item in filelist:
|
||||
if includespec.match_file(item) and not excludespec.match_file(item):
|
||||
valid.append(item)
|
||||
|
||||
self.config["rules"]["files"] = valid
|
28
testenv/lib/python2.7/site-packages/ansiblelater/standard.py
Normal file
28
testenv/lib/python2.7/site-packages/ansiblelater/standard.py
Normal file
@ -0,0 +1,28 @@
|
||||
"""Standard definition."""
|
||||
|
||||
|
||||
class Standard(object):
|
||||
"""
|
||||
Standard definition for all defined rules.
|
||||
|
||||
Later lookup the config file for a path to a rules directory
|
||||
or fallback to default `ansiblelater/data/*`.
|
||||
"""
|
||||
|
||||
def __init__(self, standard_dict):
|
||||
"""
|
||||
Initialize a new standard object and returns None.
|
||||
|
||||
:param standard_dict: Dictionary object containing all neseccary attributes
|
||||
|
||||
"""
|
||||
self.id = standard_dict.get("id", "")
|
||||
self.name = standard_dict.get("name")
|
||||
self.version = standard_dict.get("version")
|
||||
self.check = standard_dict.get("check")
|
||||
self.types = standard_dict.get("types")
|
||||
|
||||
|
||||
def __repr__(self): # noqa
|
||||
return "Standard: %s (version: %s, types: %s)" % (
|
||||
self.name, self.version, self.types)
|
@ -0,0 +1,113 @@
|
||||
"""Global utils collection."""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
import yaml
|
||||
|
||||
from ansiblelater import logger
|
||||
|
||||
try:
|
||||
import ConfigParser as configparser # noqa
|
||||
except ImportError:
|
||||
import configparser # noqa
|
||||
|
||||
|
||||
LOG = logger.get_logger(__name__)
|
||||
|
||||
|
||||
def count_spaces(c_string):
|
||||
leading_spaces = 0
|
||||
trailing_spaces = 0
|
||||
|
||||
for i, e in enumerate(c_string):
|
||||
if not e.isspace():
|
||||
break
|
||||
leading_spaces += 1
|
||||
|
||||
for i, e in reversed(list(enumerate(c_string))):
|
||||
if not e.isspace():
|
||||
break
|
||||
trailing_spaces += 1
|
||||
|
||||
return((leading_spaces, trailing_spaces))
|
||||
|
||||
|
||||
def get_property(prop):
|
||||
currentdir = os.path.dirname(os.path.realpath(__file__))
|
||||
parentdir = os.path.dirname(currentdir)
|
||||
result = re.search(
|
||||
r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(prop),
|
||||
open(os.path.join(parentdir, "__init__.py")).read())
|
||||
return result.group(1)
|
||||
|
||||
|
||||
def standards_latest(standards):
|
||||
return max([standard.version for standard in standards if standard.version] or ["0.1"],
|
||||
key=LooseVersion)
|
||||
|
||||
|
||||
def lines_ranges(lines_spec):
|
||||
if not lines_spec:
|
||||
return None
|
||||
result = []
|
||||
for interval in lines_spec.split(","):
|
||||
(start, end) = interval.split("-")
|
||||
result.append(range(int(start), int(end) + 1))
|
||||
return result
|
||||
|
||||
|
||||
def is_line_in_ranges(line, ranges):
|
||||
return not ranges or any([line in r for r in ranges])
|
||||
|
||||
|
||||
def safe_load(string):
|
||||
"""
|
||||
Parse the provided string returns a dict.
|
||||
|
||||
:param string: A string to be parsed.
|
||||
:returns: dict
|
||||
|
||||
"""
|
||||
try:
|
||||
return yaml.safe_load(string) or {}
|
||||
except yaml.scanner.ScannerError as e:
|
||||
print(str(e))
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def open_file(filename, mode="r"):
|
||||
"""
|
||||
Open the provide file safely and returns a file type.
|
||||
|
||||
:param filename: A string containing an absolute path to the file to open.
|
||||
:param mode: A string describing the way in which the file will be used.
|
||||
:returns: file type
|
||||
|
||||
"""
|
||||
with open(filename, mode) as stream:
|
||||
yield stream
|
||||
|
||||
|
||||
def add_dict_branch(tree, vector, value):
|
||||
key = vector[0]
|
||||
tree[key] = value \
|
||||
if len(vector) == 1 \
|
||||
else add_dict_branch(tree[key] if key in tree else {},
|
||||
vector[1:],
|
||||
value)
|
||||
return tree
|
||||
|
||||
|
||||
def sysexit(code=1):
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
def sysexit_with_message(msg, code=1):
|
||||
LOG.critical(msg)
|
||||
sysexit(code)
|
@ -0,0 +1,139 @@
|
||||
"""Abstracted methods to simplify role writeup."""
|
||||
|
||||
import codecs
|
||||
from collections import defaultdict
|
||||
|
||||
import yaml
|
||||
from yamllint import linter
|
||||
from yamllint.config import YamlLintConfig
|
||||
|
||||
from ansiblelater.command.candidates import Error
|
||||
from ansiblelater.exceptions import LaterAnsibleError
|
||||
from ansiblelater.exceptions import LaterError
|
||||
|
||||
from .yamlhelper import action_tasks
|
||||
from .yamlhelper import normalize_task
|
||||
from .yamlhelper import normalized_yaml
|
||||
from .yamlhelper import parse_yaml_linenumbers
|
||||
|
||||
|
||||
def get_tasks(candidate, settings):
|
||||
errors = []
|
||||
try:
|
||||
with codecs.open(candidate.path, mode="rb", encoding="utf-8") as f:
|
||||
yamllines = parse_yaml_linenumbers(f, candidate.path)
|
||||
|
||||
except LaterError as ex:
|
||||
e = ex.original
|
||||
errors.append(Error(e.problem_mark.line + 1, "syntax error: %s" % (e.problem)))
|
||||
except LaterAnsibleError as e:
|
||||
errors.append(Error(e.line, "syntax error: %s" % (e.message)))
|
||||
|
||||
return yamllines, errors
|
||||
|
||||
|
||||
def get_action_tasks(candidate, settings):
|
||||
tasks = []
|
||||
errors = []
|
||||
try:
|
||||
with codecs.open(candidate.path, mode="rb", encoding="utf-8") as f:
|
||||
yamllines = parse_yaml_linenumbers(f, candidate.path)
|
||||
|
||||
if yamllines:
|
||||
tasks = action_tasks(yamllines, candidate)
|
||||
except LaterError as ex:
|
||||
e = ex.original
|
||||
errors.append(Error(e.problem_mark.line + 1, "syntax error: %s" % (e.problem)))
|
||||
except LaterAnsibleError as e:
|
||||
errors.append(Error(e.line, "syntax error: %s" % (e.message)))
|
||||
|
||||
return tasks, errors
|
||||
|
||||
|
||||
def get_normalized_task(task, candidate, settings):
|
||||
normalized = None
|
||||
errors = []
|
||||
try:
|
||||
normalized = normalize_task(task, candidate.path, settings["ansible"]["custom_modules"])
|
||||
except LaterError as ex:
|
||||
e = ex.original
|
||||
errors.append(Error(e.problem_mark.line + 1, "syntax error: %s" % (e.problem)))
|
||||
except LaterAnsibleError as e:
|
||||
errors.append(Error(e.line, "syntax error: %s" % (e.message)))
|
||||
|
||||
return normalized, errors
|
||||
|
||||
|
||||
def get_normalized_tasks(candidate, settings):
|
||||
normalized = []
|
||||
errors = []
|
||||
try:
|
||||
with codecs.open(candidate.path, mode="rb", encoding="utf-8") as f:
|
||||
yamllines = parse_yaml_linenumbers(f, candidate.path)
|
||||
|
||||
if yamllines:
|
||||
tasks = action_tasks(yamllines, candidate)
|
||||
for task in tasks:
|
||||
# An empty `tags` block causes `None` to be returned if
|
||||
# the `or []` is not present - `task.get("tags", [])`
|
||||
# does not suffice.
|
||||
if "skip_ansible_lint" in (task.get("tags") or []):
|
||||
# No need to normalize_task if we are skipping it.
|
||||
continue
|
||||
normalized.append(
|
||||
normalize_task(task, candidate.path, settings["ansible"]["custom_modules"]))
|
||||
|
||||
except LaterError as ex:
|
||||
e = ex.original
|
||||
errors.append(Error(e.problem_mark.line + 1, "syntax error: %s" % (e.problem)))
|
||||
except LaterAnsibleError as e:
|
||||
errors.append(Error(e.line, "syntax error: %s" % (e.message)))
|
||||
|
||||
return normalized, errors
|
||||
|
||||
|
||||
def get_normalized_yaml(candidate, settings, options=None):
|
||||
errors = []
|
||||
|
||||
if not options:
|
||||
options = defaultdict(dict)
|
||||
options.update(remove_empty=True)
|
||||
options.update(remove_markers=True)
|
||||
|
||||
try:
|
||||
yamllines = normalized_yaml(candidate.path, options)
|
||||
except LaterError as ex:
|
||||
e = ex.original
|
||||
errors.append(Error(e.problem_mark.line + 1, "syntax error: %s" % (e.problem)))
|
||||
except LaterAnsibleError as e:
|
||||
errors.append(Error(e.line, "syntax error: %s" % (e.message)))
|
||||
|
||||
return yamllines, errors
|
||||
|
||||
|
||||
def get_raw_yaml(candidate, settings):
|
||||
content = None
|
||||
errors = []
|
||||
|
||||
try:
|
||||
with codecs.open(candidate.path, mode="rb", encoding="utf-8") as f:
|
||||
content = yaml.safe_load(f)
|
||||
|
||||
except LaterError as ex:
|
||||
e = ex.original
|
||||
errors.append(Error(e.problem_mark.line + 1, "syntax error: %s" % (e.problem)))
|
||||
|
||||
return content, errors
|
||||
|
||||
|
||||
def run_yamllint(path, options="extends: default"):
|
||||
errors = []
|
||||
try:
|
||||
with codecs.open(path, mode="rb", encoding="utf-8") as f:
|
||||
for problem in linter.run(f, YamlLintConfig(options)):
|
||||
errors.append(Error(problem.line, problem.desc))
|
||||
except LaterError as ex:
|
||||
e = ex.original
|
||||
errors.append(Error(e.problem_mark.line + 1, "syntax error: %s" % (e.problem)))
|
||||
|
||||
return errors
|
@ -0,0 +1,524 @@
|
||||
"""Utils for YAML file operations."""
|
||||
|
||||
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
|
||||
import codecs
|
||||
import glob
|
||||
import imp
|
||||
import inspect
|
||||
import os
|
||||
|
||||
import ansible.parsing.mod_args
|
||||
import six
|
||||
import yaml
|
||||
from ansible import constants
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
from ansible.parsing.mod_args import ModuleArgsParser
|
||||
from ansible.parsing.yaml.constructor import AnsibleConstructor
|
||||
from ansible.parsing.yaml.loader import AnsibleLoader
|
||||
from ansible.template import Templar
|
||||
from yaml.composer import Composer
|
||||
|
||||
from ansiblelater.exceptions import LaterAnsibleError
|
||||
from ansiblelater.exceptions import LaterError
|
||||
|
||||
try:
|
||||
# Try to import the Ansible 2 module first, it's the future-proof one
|
||||
from ansible.parsing.splitter import split_args
|
||||
|
||||
except ImportError:
|
||||
# Fallback on the Ansible 1.9 module
|
||||
from ansible.module_utils.splitter import split_args
|
||||
|
||||
# ansible-later doesn't need/want to know about encrypted secrets, but it needs
|
||||
# Ansible 2.3+ allows encrypted secrets within yaml files, so we pass a string
|
||||
# as the password to enable such yaml files to be opened and parsed successfully.
|
||||
DEFAULT_VAULT_PASSWORD = "x"
|
||||
|
||||
|
||||
def parse_yaml_from_file(filepath):
|
||||
dl = DataLoader()
|
||||
if hasattr(dl, "set_vault_password"):
|
||||
dl.set_vault_password(DEFAULT_VAULT_PASSWORD)
|
||||
return dl.load_from_file(filepath)
|
||||
|
||||
|
||||
def path_dwim(basedir, given):
|
||||
dl = DataLoader()
|
||||
dl.set_basedir(basedir)
|
||||
return dl.path_dwim(given)
|
||||
|
||||
|
||||
def ansible_template(basedir, varname, templatevars, **kwargs):
|
||||
dl = DataLoader()
|
||||
dl.set_basedir(basedir)
|
||||
templar = Templar(dl, variables=templatevars)
|
||||
return templar.template(varname, **kwargs)
|
||||
|
||||
|
||||
try:
|
||||
from ansible.plugins import module_loader
|
||||
except ImportError:
|
||||
from ansible.plugins.loader import module_loader
|
||||
|
||||
LINE_NUMBER_KEY = "__line__"
|
||||
FILENAME_KEY = "__file__"
|
||||
|
||||
VALID_KEYS = [
|
||||
"name", "action", "when", "async", "poll", "notify",
|
||||
"first_available_file", "include", "import_playbook",
|
||||
"tags", "register", "ignore_errors", "delegate_to",
|
||||
"local_action", "transport", "remote_user", "sudo",
|
||||
"sudo_user", "sudo_pass", "when", "connection", "environment", "args", "always_run",
|
||||
"any_errors_fatal", "changed_when", "failed_when", "check_mode", "delay",
|
||||
"retries", "until", "su", "su_user", "su_pass", "no_log", "run_once",
|
||||
"become", "become_user", "become_method", FILENAME_KEY,
|
||||
]
|
||||
|
||||
BLOCK_NAME_TO_ACTION_TYPE_MAP = {
|
||||
"tasks": "task",
|
||||
"handlers": "handler",
|
||||
"pre_tasks": "task",
|
||||
"post_tasks": "task",
|
||||
"block": "meta",
|
||||
"rescue": "meta",
|
||||
"always": "meta",
|
||||
}
|
||||
|
||||
|
||||
def load_plugins(directory):
|
||||
result = []
|
||||
fh = None
|
||||
|
||||
for pluginfile in glob.glob(os.path.join(directory, "[A-Za-z]*.py")):
|
||||
|
||||
pluginname = os.path.basename(pluginfile.replace(".py", ""))
|
||||
try:
|
||||
fh, filename, desc = imp.find_module(pluginname, [directory])
|
||||
mod = imp.load_module(pluginname, fh, filename, desc)
|
||||
obj = getattr(mod, pluginname)()
|
||||
result.append(obj)
|
||||
finally:
|
||||
if fh:
|
||||
fh.close()
|
||||
return result
|
||||
|
||||
|
||||
def tokenize(line):
|
||||
tokens = line.lstrip().split(" ")
|
||||
if tokens[0] == "-":
|
||||
tokens = tokens[1:]
|
||||
if tokens[0] == "action:" or tokens[0] == "local_action:":
|
||||
tokens = tokens[1:]
|
||||
command = tokens[0].replace(":", "")
|
||||
|
||||
args = list()
|
||||
kwargs = dict()
|
||||
nonkvfound = False
|
||||
for arg in tokens[1:]:
|
||||
if "=" in arg and not nonkvfound:
|
||||
kv = arg.split("=", 1)
|
||||
kwargs[kv[0]] = kv[1]
|
||||
else:
|
||||
nonkvfound = True
|
||||
args.append(arg)
|
||||
return (command, args, kwargs)
|
||||
|
||||
|
||||
def _playbook_items(pb_data):
|
||||
if isinstance(pb_data, dict):
|
||||
return pb_data.items()
|
||||
elif not pb_data:
|
||||
return []
|
||||
else:
|
||||
return [item for play in pb_data for item in play.items()]
|
||||
|
||||
|
||||
def find_children(playbook, playbook_dir):
|
||||
if not os.path.exists(playbook[0]):
|
||||
return []
|
||||
if playbook[1] == "role":
|
||||
playbook_ds = {"roles": [{"role": playbook[0]}]}
|
||||
else:
|
||||
try:
|
||||
playbook_ds = parse_yaml_from_file(playbook[0])
|
||||
except AnsibleError as e:
|
||||
raise SystemExit(str(e))
|
||||
results = []
|
||||
basedir = os.path.dirname(playbook[0])
|
||||
items = _playbook_items(playbook_ds)
|
||||
for item in items:
|
||||
for child in play_children(basedir, item, playbook[1], playbook_dir):
|
||||
if "$" in child["path"] or "{{" in child["path"]:
|
||||
continue
|
||||
valid_tokens = list()
|
||||
for token in split_args(child["path"]):
|
||||
if "=" in token:
|
||||
break
|
||||
valid_tokens.append(token)
|
||||
path = " ".join(valid_tokens)
|
||||
results.append({
|
||||
"path": path_dwim(basedir, path),
|
||||
"type": child["type"]
|
||||
})
|
||||
return results
|
||||
|
||||
|
||||
def template(basedir, value, variables, fail_on_undefined=False, **kwargs):
|
||||
try:
|
||||
value = ansible_template(os.path.abspath(basedir), value, variables,
|
||||
**dict(kwargs, fail_on_undefined=fail_on_undefined))
|
||||
# Hack to skip the following exception when using to_json filter on a variable.
|
||||
# I guess the filter doesn't like empty vars...
|
||||
except (AnsibleError, ValueError):
|
||||
# templating failed, so just keep value as is.
|
||||
pass
|
||||
return value
|
||||
|
||||
|
||||
def play_children(basedir, item, parent_type, playbook_dir):
|
||||
delegate_map = {
|
||||
"tasks": _taskshandlers_children,
|
||||
"pre_tasks": _taskshandlers_children,
|
||||
"post_tasks": _taskshandlers_children,
|
||||
"block": _taskshandlers_children,
|
||||
"include": _include_children,
|
||||
"import_playbook": _include_children,
|
||||
"roles": _roles_children,
|
||||
"dependencies": _roles_children,
|
||||
"handlers": _taskshandlers_children,
|
||||
}
|
||||
(k, v) = item
|
||||
play_library = os.path.join(os.path.abspath(basedir), "library")
|
||||
_load_library_if_exists(play_library)
|
||||
|
||||
if k in delegate_map:
|
||||
if v:
|
||||
v = template(os.path.abspath(basedir),
|
||||
v,
|
||||
dict(playbook_dir=os.path.abspath(basedir)),
|
||||
fail_on_undefined=False)
|
||||
return delegate_map[k](basedir, k, v, parent_type)
|
||||
return []
|
||||
|
||||
|
||||
def _include_children(basedir, k, v, parent_type):
|
||||
# handle include: filename.yml tags=blah
|
||||
(command, args, kwargs) = tokenize("{0}: {1}".format(k, v))
|
||||
|
||||
result = path_dwim(basedir, args[0])
|
||||
if not os.path.exists(result) and not basedir.endswith("tasks"):
|
||||
result = path_dwim(os.path.join(basedir, "..", "tasks"), v)
|
||||
return [{"path": result, "type": parent_type}]
|
||||
|
||||
|
||||
def _taskshandlers_children(basedir, k, v, parent_type):
|
||||
results = []
|
||||
for th in v:
|
||||
if "include" in th:
|
||||
append_children(th["include"], basedir, k, parent_type, results)
|
||||
elif "include_tasks" in th:
|
||||
append_children(th["include_tasks"], basedir, k, parent_type, results)
|
||||
elif "import_playbook" in th:
|
||||
append_children(th["import_playbook"], basedir, k, parent_type, results)
|
||||
elif "import_tasks" in th:
|
||||
append_children(th["import_tasks"], basedir, k, parent_type, results)
|
||||
elif "import_role" in th:
|
||||
results.extend(_roles_children(basedir, k, [th["import_role"].get("name")], parent_type,
|
||||
main=th["import_role"].get("tasks_from", "main")))
|
||||
elif "include_role" in th:
|
||||
results.extend(_roles_children(basedir, k, [th["include_role"].get("name")],
|
||||
parent_type,
|
||||
main=th["include_role"].get("tasks_from", "main")))
|
||||
elif "block" in th:
|
||||
results.extend(_taskshandlers_children(basedir, k, th["block"], parent_type))
|
||||
if "rescue" in th:
|
||||
results.extend(_taskshandlers_children(basedir, k, th["rescue"], parent_type))
|
||||
if "always" in th:
|
||||
results.extend(_taskshandlers_children(basedir, k, th["always"], parent_type))
|
||||
return results
|
||||
|
||||
|
||||
def append_children(taskhandler, basedir, k, parent_type, results):
|
||||
# when taskshandlers_children is called for playbooks, the
|
||||
# actual type of the included tasks is the section containing the
|
||||
# include, e.g. tasks, pre_tasks, or handlers.
|
||||
if parent_type == "playbook":
|
||||
playbook_section = k
|
||||
else:
|
||||
playbook_section = parent_type
|
||||
results.append({
|
||||
"path": path_dwim(basedir, taskhandler),
|
||||
"type": playbook_section
|
||||
})
|
||||
|
||||
|
||||
def _roles_children(basedir, k, v, parent_type, main="main"):
|
||||
results = []
|
||||
for role in v:
|
||||
if isinstance(role, dict):
|
||||
if "role" in role or "name" in role:
|
||||
if "tags" not in role or "skip_ansible_later" not in role["tags"]:
|
||||
results.extend(_look_for_role_files(basedir,
|
||||
role.get("role", role.get("name")),
|
||||
main=main))
|
||||
else:
|
||||
raise SystemExit("role dict {0} does not contain a 'role' "
|
||||
"or 'name' key".format(role))
|
||||
else:
|
||||
results.extend(_look_for_role_files(basedir, role, main=main))
|
||||
return results
|
||||
|
||||
|
||||
def _load_library_if_exists(path):
|
||||
if os.path.exists(path):
|
||||
module_loader.add_directory(path)
|
||||
|
||||
|
||||
def _rolepath(basedir, role):
|
||||
role_path = None
|
||||
|
||||
possible_paths = [
|
||||
# if included from a playbook
|
||||
path_dwim(basedir, os.path.join("roles", role)),
|
||||
path_dwim(basedir, role),
|
||||
# if included from roles/[role]/meta/main.yml
|
||||
path_dwim(
|
||||
basedir, os.path.join("..", "..", "..", "roles", role)
|
||||
),
|
||||
path_dwim(basedir, os.path.join("..", "..", role))
|
||||
]
|
||||
|
||||
if constants.DEFAULT_ROLES_PATH:
|
||||
search_locations = constants.DEFAULT_ROLES_PATH
|
||||
if isinstance(search_locations, six.string_types):
|
||||
search_locations = search_locations.split(os.pathsep)
|
||||
for loc in search_locations:
|
||||
loc = os.path.expanduser(loc)
|
||||
possible_paths.append(path_dwim(loc, role))
|
||||
|
||||
for path_option in possible_paths:
|
||||
if os.path.isdir(path_option):
|
||||
role_path = path_option
|
||||
break
|
||||
|
||||
if role_path:
|
||||
_load_library_if_exists(os.path.join(role_path, "library"))
|
||||
|
||||
return role_path
|
||||
|
||||
|
||||
def _look_for_role_files(basedir, role, main="main"):
|
||||
role_path = _rolepath(basedir, role)
|
||||
if not role_path:
|
||||
return []
|
||||
|
||||
results = []
|
||||
|
||||
for th in ["tasks", "handlers", "meta"]:
|
||||
for ext in (".yml", ".yaml"):
|
||||
thpath = os.path.join(role_path, th, main + ext)
|
||||
if os.path.exists(thpath):
|
||||
results.append({"path": thpath, "type": th})
|
||||
break
|
||||
return results
|
||||
|
||||
|
||||
def rolename(filepath):
|
||||
idx = filepath.find("roles/")
|
||||
if idx < 0:
|
||||
return ""
|
||||
role = filepath[idx + 6:]
|
||||
role = role[:role.find("/")]
|
||||
return role
|
||||
|
||||
|
||||
def _kv_to_dict(v):
|
||||
(command, args, kwargs) = tokenize(v)
|
||||
return (dict(__ansible_module__=command, __ansible_arguments__=args, **kwargs))
|
||||
|
||||
|
||||
def normalize_task(task, filename, custom_modules=[]):
|
||||
"""Ensure tasks have an action key and strings are converted to python objects."""
|
||||
ansible_action_type = task.get("__ansible_action_type__", "task")
|
||||
ansible_action_meta = task.get("__ansible_action_meta__", dict())
|
||||
if "__ansible_action_type__" in task:
|
||||
del(task["__ansible_action_type__"])
|
||||
|
||||
normalized = dict()
|
||||
# TODO: Workaround for custom modules
|
||||
builtin = list(ansible.parsing.mod_args.BUILTIN_TASKS)
|
||||
builtin = list(set(builtin + custom_modules))
|
||||
ansible.parsing.mod_args.BUILTIN_TASKS = frozenset(builtin)
|
||||
mod_arg_parser = ModuleArgsParser(task)
|
||||
try:
|
||||
action, arguments, normalized["delegate_to"] = mod_arg_parser.parse()
|
||||
except AnsibleParserError as e:
|
||||
raise LaterAnsibleError("syntax error", e)
|
||||
|
||||
# denormalize shell -> command conversion
|
||||
if "_uses_shell" in arguments:
|
||||
action = "shell"
|
||||
del(arguments["_uses_shell"])
|
||||
|
||||
for (k, v) in list(task.items()):
|
||||
if k in ("action", "local_action", "args", "delegate_to") or k == action:
|
||||
# we don"t want to re-assign these values, which were
|
||||
# determined by the ModuleArgsParser() above
|
||||
continue
|
||||
else:
|
||||
normalized[k] = v
|
||||
|
||||
normalized["action"] = dict(__ansible_module__=action)
|
||||
|
||||
if "_raw_params" in arguments:
|
||||
normalized["action"]["__ansible_arguments__"] = arguments["_raw_params"].split(" ")
|
||||
del(arguments["_raw_params"])
|
||||
else:
|
||||
normalized["action"]["__ansible_arguments__"] = list()
|
||||
normalized["action"].update(arguments)
|
||||
|
||||
normalized[FILENAME_KEY] = filename
|
||||
normalized["__ansible_action_type__"] = ansible_action_type
|
||||
normalized["__ansible_action_meta__"] = ansible_action_meta
|
||||
return normalized
|
||||
|
||||
|
||||
def action_tasks(yaml, file):
|
||||
tasks = list()
|
||||
if file["filetype"] in ["tasks", "handlers"]:
|
||||
tasks = add_action_type(yaml, file["filetype"])
|
||||
else:
|
||||
tasks.extend(extract_from_list(yaml, ["tasks", "handlers", "pre_tasks", "post_tasks"]))
|
||||
|
||||
# Add sub-elements of block/rescue/always to tasks list
|
||||
tasks.extend(extract_from_list(tasks, ["block", "rescue", "always"]))
|
||||
# Remove block/rescue/always elements from tasks list
|
||||
block_rescue_always = ("block", "rescue", "always")
|
||||
tasks[:] = [task for task in tasks if all(k not in task for k in block_rescue_always)]
|
||||
|
||||
return [task for task in tasks if set(
|
||||
["include", "include_tasks", "import_playbook", "import_tasks"]).isdisjoint(task.keys())]
|
||||
|
||||
|
||||
def task_to_str(task):
|
||||
name = task.get("name")
|
||||
if name:
|
||||
return name
|
||||
action = task.get("action")
|
||||
args = " ".join([u"{0}={1}".format(k, v) for (k, v) in action.items()
|
||||
if k not in ["__ansible_module__", "__ansible_arguments__"]
|
||||
] + action.get("__ansible_arguments__"))
|
||||
return u"{0} {1}".format(action["__ansible_module__"], args)
|
||||
|
||||
|
||||
def extract_from_list(blocks, candidates):
|
||||
results = list()
|
||||
for block in blocks:
|
||||
for candidate in candidates:
|
||||
delete_meta_keys = [candidate, "__line__", "__file__", "__ansible_action_type__"]
|
||||
if isinstance(block, dict) and candidate in block:
|
||||
if isinstance(block[candidate], list):
|
||||
meta_data = dict(block)
|
||||
for key in delete_meta_keys:
|
||||
meta_data.pop(key, None)
|
||||
results.extend(add_action_type(block[candidate], candidate, meta_data))
|
||||
elif block[candidate] is not None:
|
||||
raise RuntimeError(
|
||||
"Key '%s' defined, but bad value: '%s'" %
|
||||
(candidate, str(block[candidate])))
|
||||
return results
|
||||
|
||||
|
||||
def add_action_type(actions, action_type, action_meta=None):
|
||||
results = list()
|
||||
for action in actions:
|
||||
action["__ansible_action_type__"] = BLOCK_NAME_TO_ACTION_TYPE_MAP[action_type]
|
||||
if action_meta:
|
||||
action["__ansible_action_meta__"] = action_meta
|
||||
results.append(action)
|
||||
return results
|
||||
|
||||
|
||||
def parse_yaml_linenumbers(data, filename):
|
||||
"""
|
||||
Parse yaml as ansible.utils.parse_yaml but with linenumbers.
|
||||
|
||||
The line numbers are stored in each node's LINE_NUMBER_KEY key.
|
||||
|
||||
"""
|
||||
def compose_node(parent, index):
|
||||
# the line number where the previous token has ended (plus empty lines)
|
||||
line = loader.line
|
||||
node = Composer.compose_node(loader, parent, index)
|
||||
node.__line__ = line + 1
|
||||
return node
|
||||
|
||||
def construct_mapping(node, deep=False):
|
||||
mapping = AnsibleConstructor.construct_mapping(loader, node, deep=deep)
|
||||
if hasattr(node, "__line__"):
|
||||
mapping[LINE_NUMBER_KEY] = node.__line__
|
||||
else:
|
||||
mapping[LINE_NUMBER_KEY] = mapping._line_number
|
||||
mapping[FILENAME_KEY] = filename
|
||||
return mapping
|
||||
|
||||
try:
|
||||
kwargs = {}
|
||||
if "vault_password" in inspect.getargspec(AnsibleLoader.__init__).args:
|
||||
kwargs["vault_password"] = DEFAULT_VAULT_PASSWORD
|
||||
loader = AnsibleLoader(data, **kwargs)
|
||||
loader.compose_node = compose_node
|
||||
loader.construct_mapping = construct_mapping
|
||||
data = loader.get_single_data()
|
||||
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
|
||||
raise LaterError("syntax error", e)
|
||||
except (yaml.composer.ComposerError) as e:
|
||||
e.problem = "{} {}".format(e.context, e.problem)
|
||||
raise LaterError("syntax error", e)
|
||||
return data
|
||||
|
||||
|
||||
def normalized_yaml(file, options):
|
||||
lines = []
|
||||
removes = []
|
||||
|
||||
try:
|
||||
with codecs.open(file, mode="rb", encoding="utf-8") as f:
|
||||
lines = list(enumerate(f.readlines(), start=1))
|
||||
|
||||
for i, line in lines:
|
||||
if line.strip().startswith("#"):
|
||||
removes.append((i, line))
|
||||
# remove document starter also
|
||||
if options.get("remove_markers") and line.strip() == "---":
|
||||
removes.append((i, line))
|
||||
# remove empty lines
|
||||
if options.get("remove_empty") and not line.strip():
|
||||
removes.append((i, line))
|
||||
|
||||
for line in removes:
|
||||
lines.remove(line)
|
||||
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
|
||||
raise LaterError("syntax error", e)
|
||||
return lines
|
Loading…
Reference in New Issue
Block a user