From 660afb539228b60734e4f24cc45bf16430f10c5e Mon Sep 17 00:00:00 2001 From: Robert Kaussow Date: Tue, 18 Aug 2020 23:44:49 +0200 Subject: [PATCH] add custom modules --- .flake8 | 2 +- plugins/inventory/proxmox.py | 2 +- plugins/modules/corenetworks_dns.py | 256 +++++ plugins/modules/corenetworks_token.py | 112 +++ plugins/modules/iptables_raw.py | 1070 ++++++++++++++++++++ plugins/modules/openssl_pkcs12.py | 415 ++++++++ plugins/modules/proxmox_kvm.py | 1335 +++++++++++++++++++++++++ plugins/modules/ucr.py | 116 +++ setup.cfg | 2 +- test/unit/requirements.txt | 8 +- 10 files changed, 3314 insertions(+), 4 deletions(-) create mode 100644 plugins/modules/corenetworks_dns.py create mode 100644 plugins/modules/corenetworks_token.py create mode 100644 plugins/modules/iptables_raw.py create mode 100644 plugins/modules/openssl_pkcs12.py create mode 100644 plugins/modules/proxmox_kvm.py create mode 100644 plugins/modules/ucr.py diff --git a/.flake8 b/.flake8 index af539aa..88ea07a 100644 --- a/.flake8 +++ b/.flake8 @@ -1,5 +1,5 @@ [flake8] -ignore = D101, D102, D103, D107, D202, E402, W503 +ignore = D101, D102, D103, D105, D107, D202, E402, W503 max-line-length = 99 inline-quotes = double exclude = diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 0ea5b00..237944a 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -1,9 +1,9 @@ -"""Dynamic inventory plugin for Proxmox VE.""" # -*- coding: utf-8 -*- # Copyright (c) 2014, Mathieu GAUTHIER-LAFAYE # Copyright (c) 2016, Matt Harris # Copyright (c) 2020, Robert Kaussow # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Dynamic inventory plugin for Proxmox VE.""" from __future__ import absolute_import, division, print_function diff --git a/plugins/modules/corenetworks_dns.py b/plugins/modules/corenetworks_dns.py new file mode 100644 index 0000000..63ccf71 --- /dev/null +++ b/plugins/modules/corenetworks_dns.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- +"""Module to control corenetworks DNS API.""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type +ANSIBLE_METADATA = {"metadata_version": "1.1", "status": ["preview"], "supported_by": "community"} + +DOCUMENTATION = r""" +--- +module: corenetworks_dns +short_description: Interface with the DNS API of core-networks.de +description: + - "Manages DNS zones and records via the core networks API, see the docs: U(https://beta.api.core-networks.de/doc/)." +options: + api_user: + description: + - Account API username. If omitted, the environment variables C(CN_API_USER) and C(CN_API_PASSWORD) will be looked for. + - You should prefere to use `api_token` or the `corenetworks_token` module to create one to prevent running into rate limits. + type: str + api_password: + description: + - Account API password. + type: str + api_token: + description: + - Account API token. + type: str + zone: + description: + - The name of the Zone to work with (e.g. "example.com"). + - The Zone must already exist. + zone: + type: str + required: true + aliases: [ domain ] + record: + description: + - Used record relative to the given zone. + - Default is C(@) (e.g. the zone name). + type: str + default: "@" + aliases: [ name ] + type: + description: + - The type of DNS record to create. + choices: [ "A", "ALIAS", "CNAME", "MX", "SPF", "URL", "TXT", "NS", "SRV", "NAPTR", "PTR", "AAAA", "SSHFP", "HINFO", "POOL" ] + type: str + ttl: + description: + - The TTL to give the new record in seconds. + default: 3600 + type: int + value: + description: + - Record value. + - Must be specified when trying to ensure a record exists. + type: str + solo: + description: + - Whether the record should be the only one for that record type and record name. + - Only use with C(state=present). + - This will delete all other records with the same record name and type. + type: bool + state: + description: + - whether the record should exist or not + choices: [ "present", "absent" ] + default: present + type: str +requirements: + - "corenetworks >= 0.1.4" +author: "Robert Kaussow (@xoxys)" +""" # noqa + +EXAMPLES = """ +- name: Create a test.my.com A record to point to 127.0.0.1 + corenetworks_dns: + zone: my.com + record: test + type: A + value: 127.0.0.1 + delegate_to: localhost + register: record + +- name: Create a my.com CNAME record to example.com + corenetworks_dns: + zone: my.com + type: CNAME + value: example.com + state: present + delegate_to: localhost + +- name: Change TTL value for a record + corenetworks_dns: + zone: my.com + type: CNAME + value: example.com + ttl: 600 + state: present + delegate_to: localhost + +- name: Delete the record + corenetworks_dns: + zone: my.com + type: CNAME + value: example.com + state: absent + delegate_to: localhost +""" + +RETURN = r"""# """ + +import copy +import traceback + +CORENETWORKS_IMP_ERR = None +try: + from corenetworks import CoreNetworks + from corenetworks.exceptions import CoreNetworksException + HAS_CORENETWORKS = True +except ImportError: + CORENETWORKS_IMP_ERR = traceback.format_exc() + HAS_CORENETWORKS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def delete_records(client, module, zone, params, is_solo=False): + changed = False + + search = copy.deepcopy(params) + if is_solo: + search.pop("data", None) + search.pop("ttl", None) + + records = client.records(zone, params=search) + + for r in records: + r["ttl"] = int(r["ttl"]) + + if is_solo: + if not (r["data"] == params["data"] and r["ttl"] == params["ttl"]): + changed = True + if not module.check_mode: + client.delete_record(zone, r) + else: + changed = True + if not module.check_mode: + client.delete_record(zone, r) + + return changed + + +def add_record(client, module, zone, params): + changed = False + result = [] + records = client.records(zone, params=params) + + if len(records) > 1: + module.fail_json( + msg="More than one record already exists for the given attributes. " + "That should be impossible, please open an issue!" + ) + + if len(records) == 0: + changed = True + if not module.check_mode: + result = client.add_record(zone, params=params) + + return result, changed + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_user=dict(type="str"), + api_password=dict(type="str", no_log=True), + api_token=dict(type="str", no_log=True), + zone=dict(type="str", required=True, aliases=["domain"]), + record=dict(type="str", default="@", aliases=["name"]), + type=dict( + type="str", + choices=[ + "A", "ALIAS", "CNAME", "MX", "SPF", "URL", "TXT", "NS", "SRV", "NAPTR", "PTR", + "AAAA", "SSHFP", "HINFO", "POOL" + ] + ), + ttl=dict(type="int", default=3600), + value=dict(type="str"), + solo=dict(type="bool", default=False), + state=dict(type="str", choices=["present", "absent"], default="present"), + ), + required_together=[["record", "value"]], + supports_check_mode=True, + ) + + if not HAS_CORENETWORKS: + module.fail_json(msg=missing_required_lib("corenetworks"), exception=CORENETWORKS_IMP_ERR) + + api_user = module.params.get("api_user") + api_password = module.params.get("api_password") + api_token = module.params.get("api_token") + zone = module.params.get("zone") + record = module.params.get("record") + record_type = module.params.get("type") + ttl = module.params.get("ttl") + value = module.params.get("value") + state = module.params.get("state") + is_solo = module.params.get("solo") + params = {"name": record, "ttl": ttl} + + # sanity checks + if not record_type: + if state == "present": + module.fail_json(msg="Missing the record type") + else: + params["type"] = record_type + + if not value: + if state == "present": + module.fail_json(msg="Missing the record value") + else: + params["data"] = value + + if is_solo and state == "absent": + module.fail_json(msg="solo=true can only be used with state=present") + + # perform actions + try: + # request throtteling to workaround the current rate limit + changed = False + if api_token: + client = CoreNetworks(api_token=api_token, auto_commit=True) + else: + client = CoreNetworks(user=api_user, password=api_password, auto_commit=True) + + if state == "present": + changed_solo = False + if is_solo: + changed_solo = delete_records(client, module, zone, params, is_solo=True) + result, changed = add_record(client, module, zone, params) + + module.exit_json(changed=changed_solo + changed, result=result) + # state is absent + else: + changed = delete_records(client, module, zone, params) + module.exit_json(changed=changed) + + except CoreNetworksException as e: + module.fail_json(msg="Failure in core networks API communication: {}".format(str(e))) + + module.fail_json(msg="Unknown what you wanted me to do") + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/corenetworks_token.py b/plugins/modules/corenetworks_token.py new file mode 100644 index 0000000..eaaf85e --- /dev/null +++ b/plugins/modules/corenetworks_token.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +"""Module to control corenetworks DNS API.""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type +ANSIBLE_METADATA = {"metadata_version": "1.1", "status": ["preview"], "supported_by": "community"} + +DOCUMENTATION = """ +--- +module: corenetworks_dns +short_description: Interface with the DNS API of core-networks.de +description: + - "Manages DNS zones and records via the core networks API, see the docs: U(https://beta.api.core-networks.de/doc/)." +options: + api_user: + description: + - Account API username. If omitted, the environment variables C(CN_API_USER) and C(CN_API_PASSWORD) will be looked for. + type: str + api_password: + description: + - Account API password. + type: str + state: + description: + - whether the record should exist or not + choices: [ "present" ] + default: present + type: str +requirements: + - "corenetworks >= 0.1.3" +author: "Robert Kaussow (@xoxys)" +""" # noqa + +EXAMPLES = """ +- name: Obtain an API token using env variables + corenetworks_token: + delegate_to: localhost + register: my_token + +- name: Obtain an API token using username and password attribute + corenetworks_token: + api_user: testuser + api_password: secure + delegate_to: localhost + register: my_token + +- debug: + msg: "{{ my_token }}" + +- name: Use the token + corenetworks_dns: + api_token: "{{ my_token.session.token }}" + zone: my.com + type: CNAME + value: example.com + state: present + delegate_to: localhost +""" + +RETURN = r"""# """ + +import traceback + +CORENETWORKS_IMP_ERR = None +try: + from corenetworks import CoreNetworks + from corenetworks.exceptions import CoreNetworksException + HAS_CORENETWORKS = True +except ImportError: + CORENETWORKS_IMP_ERR = traceback.format_exc() + HAS_CORENETWORKS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_user=dict(type="str"), + api_password=dict(type="str", no_log=True), + state=dict(type="str", choices=["present"], default="present"), + ), + supports_check_mode=True, + ) + + if not HAS_CORENETWORKS: + module.fail_json(msg=missing_required_lib("corenetworks"), exception=CORENETWORKS_IMP_ERR) + + api_user = module.params.get("api_user") + api_password = module.params.get("api_password") + + # perform actions + try: + # request throtteling to workaround the current rate limit + changed = False + client = CoreNetworks(user=api_user, password=api_password, auto_commit=True) + + session = {"token": client._auth.token} + + if hasattr(client._auth, "expires"): + session["expires"] = client._auth.expires.strftime("%Y-%m-%d, %H:%M:%S") + + module.exit_json(changed=changed, session=session) + + except CoreNetworksException as e: + module.fail_json(msg="Failure in core networks API communication: {}".format(str(e))) + + module.fail_json(msg="Unknown what you wanted me to do") + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/iptables_raw.py b/plugins/modules/iptables_raw.py new file mode 100644 index 0000000..49102de --- /dev/null +++ b/plugins/modules/iptables_raw.py @@ -0,0 +1,1070 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2016, Strahinja Kustudic +# Copyright (c) 2016, Damir Markovic +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""IPtables raw module.""" + +ANSIBLE_METADATA = {"status": ["preview"], "supported_by": "community", "metadata_version": "1.0"} + +DOCUMENTATION = """ +--- +module: iptables_raw +short_description: Manage iptables rules +version_added: "2.4" +description: + - Add/remove iptables rules while keeping state. +options: + backup: + description: + - Create a backup of the iptables state file before overwriting it. + required: false + choices: ["yes", "no"] + default: "no" + ipversion: + description: + - Target the IP version this rule is for. + required: false + default: "4" + choices: ["4", "6"] + keep_unmanaged: + description: + - If set to C(yes) keeps active iptables (unmanaged) rules for the target + C(table) and gives them C(weight=90). This means these rules will be + ordered after most of the rules, since default priority is 40, so they + shouldn"t be able to block any allow rules. If set to C(no) deletes all + rules which are not set by this module. + - "WARNING: Be very careful when running C(keep_unmanaged=no) for the + first time, since if you don"t specify correct rules, you can block + yourself out of the managed host." + required: false + choices: ["yes", "no"] + default: "yes" + name: + description: + - Name that will be used as an identifier for these rules. It can contain + alphanumeric characters, underscore, hyphen, or a space; has to be + UNIQUE for a specified C(table). You can also pass C(name=*) with + C(state=absent) to flush all rules in the selected table, or even all + tables with C(table=*). + required: true + rules: + description: + - The rules that we want to add. Accepts multiline values. + - "Note: You can only use C(-A)/C(--append), C(-N)/C(--new-chain), and + C(-P)/C(--policy) to specify rules." + required: false + state: + description: + - The state this rules fragment should be in. + choices: ["present", "absent"] + required: false + default: present + table: + description: + - The table this rule applies to. You can specify C(table=*) only with + with C(name=*) and C(state=absent) to flush all rules in all tables. + choices: ["filter", "nat", "mangle", "raw", "security", "*"] + required: false + default: filter + weight: + description: + - Determines the order of the rules. Lower C(weight) means higher + priority. Supported range is C(0 - 99) + choices: ["0 - 99"] + required: false + default: 40 +notes: + - Requires C(iptables) package. Debian-based distributions additionally + require C(iptables-persistent). + - "Depending on the distribution, iptables rules are saved in different + locations, so that they can be loaded on boot. Red Hat distributions (RHEL, + CentOS, etc): C(/etc/sysconfig/iptables) and C(/etc/sysconfig/ip6tables); + Debian distributions (Debian, Ubuntu, etc): C(/etc/iptables/rules.v4) and + C(/etc/iptables/rules.v6); other distributions: C(/etc/sysconfig/iptables) + and C(/etc/sysconfig/ip6tables)." + - This module saves state in C(/etc/ansible-iptables) directory, so don"t + modify this directory! +author: + - "Strahinja Kustudic (@kustodian)" + - "Damir Markovic (@damirda)" +""" + +EXAMPLES = """ +# Allow all IPv4 traffic coming in on port 80 (http) +- iptables_raw: + name: allow_tcp_80 + rules: "-A INPUT -p tcp -m tcp --dport 80 -j ACCEPT" + +# Set default rules with weight 10 and disregard all unmanaged rules +- iptables_raw: + name: default_rules + weight: 10 + keep_unmanaged: no + rules: | + -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT + -A INPUT -i lo -j ACCEPT + -A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT + -P INPUT DROP + -P FORWARD DROP + -P OUTPUT ACCEPT + +# Allow all IPv6 traffic coming in on port 443 (https) with weight 50 +- iptables_raw: + ipversion: 6 + weight: 50 + name: allow_tcp_443 + rules: "-A INPUT -p tcp -m tcp --dport 443 -j ACCEPT" + +# Remove the above rule +- iptables_raw: + state: absent + ipversion: 6 + name: allow_tcp_443 + +# Define rules with a custom chain +- iptables_raw: + name: custom1_rules + rules: | + -N CUSTOM1 + -A CUSTOM1 -s 192.168.0.0/24 -j ACCEPT + +# Reset all IPv4 iptables rules in all tables and allow all traffic +- iptables_raw: + name: "*" + table: "*" + state: absent +""" + +RETURN = """ +state: + description: state of the rules + returned: success + type: string + sample: present +name: + description: name of the rules + returned: success + type: string + sample: open_tcp_80 +weight: + description: weight of the rules + returned: success + type: int + sample: 40 +ipversion: + description: IP version of iptables used + returned: success + type: int + sample: 6 +rules: + description: passed rules + returned: success + type: string + sample: "-A INPUT -p tcp -m tcp --dport 80 -j ACCEPT" +table: + description: iptables table used + returned: success + type: string + sample: filter +backup: + description: if the iptables file should backed up + returned: success + type: boolean + sample: False +keep_unmanaged: + description: if it should keep unmanaged rules + returned: success + type: boolean + sample: True +""" + +import fcntl +import os +import re +import shlex +import tempfile +import time +from collections import defaultdict + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import json + + +# Genereates a diff dictionary from an old and new table dump. +def generate_diff(dump_old, dump_new): + diff = dict() + if dump_old != dump_new: + diff["before"] = dump_old + diff["after"] = dump_new + return diff + + +def compare_dictionaries(dict1, dict2): + if dict1 is None or dict2 is None: + return False + if not (isinstance(dict1, dict) and isinstance(dict2, dict)): + return False + shared_keys = set(dict2.keys()) & set(dict2.keys()) + if not (len(shared_keys) == len(dict1.keys()) and len(shared_keys) == len(dict2.keys())): + return False + dicts_are_equal = True + for key in dict1.keys(): + if isinstance(dict1[key], dict): + dicts_are_equal = dicts_are_equal and compare_dictionaries(dict1[key], dict2[key]) + else: + dicts_are_equal = dicts_are_equal and (dict1[key] == dict2[key]) + if not dicts_are_equal: + break + return dicts_are_equal + + +class Iptables: + + # Default chains for each table + DEFAULT_CHAINS = { + "filter": ["INPUT", "FORWARD", "OUTPUT"], + "raw": ["PREROUTING", "OUTPUT"], + "nat": ["PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"], + "mangle": ["PREROUTING", "INPUT", "FORWARD", "OUTPUT", "POSTROUTING"], + "security": ["INPUT", "FORWARD", "OUTPUT"] + } + + # List of tables + TABLES = list(DEFAULT_CHAINS.copy().keys()) + + # Directory which will store the state file. + STATE_DIR = "/etc/ansible-iptables" + + # Key used for unmanaged rules + UNMANAGED_RULES_KEY_NAME = "$unmanaged_rules$" + + # Only allow alphanumeric characters, underscore, hyphen, or a space for + # now. We don"t want to have problems while parsing comments using regular + # expressions. + RULE_NAME_ALLOWED_CHARS = "a-zA-Z0-9_ -" + + module = None + + def __init__(self, module, ipversion): + # Create directory for json files. + if not os.path.exists(self.STATE_DIR): + os.makedirs(self.STATE_DIR) + if Iptables.module is None: + Iptables.module = module + self.state_save_path = self._get_state_save_path(ipversion) + self.system_save_path = self._get_system_save_path(ipversion) + self.state_dict = self._read_state_file() + self.bins = self._get_bins(ipversion) + self.iptables_names_file = self._get_iptables_names_file(ipversion) + # Check if we have a required iptables version. + self._check_compatibility() + # Save active iptables rules for all tables, so that we don"t + # need to fetch them every time using "iptables-save" command. + self._active_rules = {} + self._refresh_active_rules(table="*") + + def __eq__(self, other): + return ( + isinstance(other, self.__class__) + and compare_dictionaries(other.state_dict, self.state_dict) + ) + + def __ne__(self, other): + return not self.__eq__(other) + + def _get_bins(self, ipversion): + if ipversion == "4": + return { + "iptables": Iptables.module.get_bin_path("iptables"), + "iptables-save": Iptables.module.get_bin_path("iptables-save"), + "iptables-restore": Iptables.module.get_bin_path("iptables-restore") + } + else: + return { + "iptables": Iptables.module.get_bin_path("ip6tables"), + "iptables-save": Iptables.module.get_bin_path("ip6tables-save"), + "iptables-restore": Iptables.module.get_bin_path("ip6tables-restore") + } + + def _get_iptables_names_file(self, ipversion): + if ipversion == "4": + return "/proc/net/ip_tables_names" + else: + return "/proc/net/ip6_tables_names" + + # Return a list of active iptables tables + def _get_list_of_active_tables(self): + if os.path.isfile(self.iptables_names_file): + table_names = open(self.iptables_names_file, "r").read() + return table_names.splitlines() + else: + return [] + + # If /etc/debian_version exist, this means this is a debian based OS (Ubuntu, Mint, etc...) + def _is_debian(self): + return os.path.isfile("/etc/debian_version") + + # Get the iptables system save path. + # Supports RHEL/CentOS "/etc/sysconfig/" location. + # Supports Debian/Ubuntu/Mint, "/etc/iptables/" location. + def _get_system_save_path(self, ipversion): + # distro detection, path setting should be added + if ipversion == "4": + if self._is_debian(): + return "/etc/iptables/rules.v4" + else: + return "/etc/sysconfig/iptables" + else: + if self._is_debian(): + return "/etc/iptables/rules.v6" + else: + return "/etc/sysconfig/ip6tables" + + # Return path to json state file. + def _get_state_save_path(self, ipversion): + if ipversion == "4": + return self.STATE_DIR + "/iptables.json" + else: + return self.STATE_DIR + "/ip6tables.json" + + # Checks if iptables is installed and if we have a correct version. + def _check_compatibility(self): + from distutils.version import StrictVersion + cmd = [self.bins["iptables"], "--version"] + rc, stdout, stderr = Iptables.module.run_command(cmd, check_rc=False) + if rc == 0: + result = re.search(r"^ip6tables\s+v(\d+\.\d+)\.\d+$", stdout) + if result: + version = result.group(1) + # CentOS 5 ip6tables (v1.3.x) doesn"t support comments, + # which means it cannot be used with this module. + if StrictVersion(version) < StrictVersion("1.4"): + Iptables.module.fail_json( + msg="This module isn't compatible with ip6tables versions older than 1.4.x" + ) + else: + Iptables.module.fail_json( + msg="Could not fetch iptables version! Is iptables installed?" + ) + + # Read rules from the json state file and return a dict. + def _read_state_file(self): + json_str = "{}" + if os.path.isfile(self.state_save_path): + json_str = open(self.state_save_path, "r").read() + read_dict = defaultdict(lambda: dict(dump="", rules_dict={}), json.loads(json_str)) + return read_dict + + # Checks if a table exists in the state_dict. + def _has_table(self, tbl): + return tbl in self.state_dict + + # Deletes table from the state_dict. + def _delete_table(self, tbl): + if self._has_table(tbl): + del self.state_dict[tbl] + + # Acquires lock or exits after wait_for_seconds if it cannot be acquired. + def acquire_lock_or_exit(self, wait_for_seconds=10): + lock_file = self.STATE_DIR + "/.iptables.lock" + i = 0 + f = open(lock_file, "w+") + while i < wait_for_seconds: + try: + fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) + return + except IOError: + i += 1 + time.sleep(1) + Iptables.module.fail_json( + msg="Could not acquire lock to continue execution! " + "Probably another instance of this module is running." + ) + + # Check if a table has anything to flush (to check all tables pass table="*"). + def table_needs_flush(self, table): + needs_flush = False + if table == "*": + for tbl in Iptables.TABLES: + # If the table exists or if it needs to be flushed that means will make changes. + if self._has_table(tbl) or self._single_table_needs_flush(tbl): + needs_flush = True + break + # Only flush the specified table + else: + if self._has_table(table) or self._single_table_needs_flush(table): + needs_flush = True + return needs_flush + + # Check if a passed table needs to be flushed. + def _single_table_needs_flush(self, table): + needs_flush = False + active_rules = self._get_active_rules(table) + if active_rules: + policies = self._filter_default_chain_policies(active_rules, table) + chains = self._filter_custom_chains(active_rules, table) + rules = self._filter_rules(active_rules, table) + # Go over default policies and check if they are all ACCEPT. + for line in policies.splitlines(): + if not re.search(r"\bACCEPT\b", line): + needs_flush = True + break + # If there is at least one rule or custom chain, that means we need flush. + if len(chains) > 0 or len(rules) > 0: + needs_flush = True + return needs_flush + + # Returns a copy of the rules dict of a passed table. + def _get_table_rules_dict(self, table): + return self.state_dict[table]["rules_dict"].copy() + + # Returns saved table dump. + def get_saved_table_dump(self, table): + return self.state_dict[table]["dump"] + + # Sets saved table dump. + def _set_saved_table_dump(self, table, dump): + self.state_dict[table]["dump"] = dump + + # Updates saved table dump from the active rules. + def refresh_saved_table_dump(self, table): + active_rules = self._get_active_rules(table) + self._set_saved_table_dump(table, active_rules) + + # Sets active rules of the passed table. + def _set_active_rules(self, table, rules): + self._active_rules[table] = rules + + # Return active rules of the passed table. + def _get_active_rules(self, table, clean=True): + active_rules = "" + if table == "*": + all_rules = [] + for tbl in Iptables.TABLES: + if tbl in self._active_rules: + all_rules.append(self._active_rules[tbl]) + active_rules = "\n".join(all_rules) + else: + active_rules = self._active_rules[table] + if clean: + return self._clean_save_dump(active_rules) + else: + return active_rules + + # Refresh active rules of a table ("*" for all tables). + def _refresh_active_rules(self, table): + if table == "*": + for tbl in Iptables.TABLES: + self._set_active_rules(tbl, self._get_system_active_rules(tbl)) + else: + self._set_active_rules(table, self._get_system_active_rules(table)) + + # Get iptables-save dump of active rules of one or all tables (pass "*") + # and return it as a string. + def _get_system_active_rules(self, table): + active_tables = self._get_list_of_active_tables() + if table == "*": + cmd = [self.bins["iptables-save"]] + # If there are no active tables, that means there are no rules + if not active_tables: + return "" + else: + cmd = [self.bins["iptables-save"], "-t", table] + # If the table is not active, that means it has no rules + if table not in active_tables: + return "" + rc, stdout, stderr = Iptables.module.run_command(cmd, check_rc=True) + return stdout + + # Splits a rule into tokens + def _split_rule_into_tokens(self, rule): + try: + return shlex.split(rule, comments=True) + except Exception: + msg = "Could not parse the iptables rule:\n{}".format(rule) + Iptables.module.fail_json(msg=msg) + + # Removes comment lines and empty lines from rules. + @staticmethod + def clean_up_rules(rules): + cleaned_rules = [] + for line in rules.splitlines(): + # Remove lines with comments and empty lines. + if not (Iptables.is_comment(line) or Iptables.is_empty_line(line)): + cleaned_rules.append(line) + return "\n".join(cleaned_rules) + + # Checks if the line is a custom chain in specific iptables table. + @staticmethod + def is_custom_chain(line, table): + default_chains = Iptables.DEFAULT_CHAINS[table] + if re.match(r"\s*(:|(-N|--new-chain)\s+)[^\s]+", line) and not re.match( + r"\s*(:|(-N|--new-chain)\s+)\b(" + "|".join(default_chains) + r")\b", line + ): + return True + else: + return False + + # Checks if the line is a default chain of an iptables table. + @staticmethod + def is_default_chain(line, table): + default_chains = Iptables.DEFAULT_CHAINS[table] + if re.match( + r"\s*(:|(-P|--policy)\s+)\b(" + "|".join(default_chains) + r")\b\s+(ACCEPT|DROP)", line + ): + return True + else: + return False + + # Checks if a line is an iptables rule. + @staticmethod + def is_rule(line): + # We should only allow adding rules with "-A/--append", since others don"t make any sense. + if re.match(r"\s*(-A|--append)\s+[^\s]+", line): + return True + else: + return False + + # Checks if a line starts with "#". + @staticmethod + def is_comment(line): + if re.match(r"\s*#", line): + return True + else: + return False + + # Checks if a line is empty. + @staticmethod + def is_empty_line(line): + if re.match(r"^$", line.strip()): + return True + else: + return False + + # Return name of custom chain from the rule. + def _get_custom_chain_name(self, line, table): + if Iptables.is_custom_chain(line, table): + return re.match(r"\s*(:|(-N|--new-chain)\s+)([^\s]+)", line).group(3) + else: + return "" + + # Return name of default chain from the rule. + def _get_default_chain_name(self, line, table): + if Iptables.is_default_chain(line, table): + return re.match(r"\s*(:|(-N|--new-chain)\s+)([^\s]+)", line).group(3) + else: + return "" + + # Return target of the default chain from the rule. + def _get_default_chain_target(self, line, table): + if Iptables.is_default_chain(line, table): + return re.match(r"\s*(:|(-N|--new-chain)\s+)([^\s]+)\s+([A-Z]+)", line).group(4) + else: + return "" + + # Removes duplicate custom chains from the table rules. + def _remove_duplicate_custom_chains(self, rules, table): + all_rules = [] + custom_chain_names = [] + for line in rules.splitlines(): + # Extract custom chains. + if Iptables.is_custom_chain(line, table): + chain_name = self._get_custom_chain_name(line, table) + if chain_name not in custom_chain_names: + custom_chain_names.append(chain_name) + all_rules.append(line) + else: + all_rules.append(line) + return "\n".join(all_rules) + + # Returns current iptables-save dump cleaned from comments and packet/byte counters. + def _clean_save_dump(self, simple_rules): + cleaned_dump = [] + for line in simple_rules.splitlines(): + # Ignore comments. + if Iptables.is_comment(line): + continue + # Reset counters for chains (begin with ":"), for easier comparing later on. + if re.match(r"\s*:", line): + cleaned_dump.append(re.sub(r"\[([0-9]+):([0-9]+)\]", "[0:0]", line)) + else: + cleaned_dump.append(line) + cleaned_dump.append("\n") + return "\n".join(cleaned_dump) + + # Returns lines with default chain policies. + def _filter_default_chain_policies(self, rules, table): + chains = [] + for line in rules.splitlines(): + if Iptables.is_default_chain(line, table): + chains.append(line) + return "\n".join(chains) + + # Returns lines with iptables rules from an iptables-save table dump + # (removes chain policies, custom chains, comments and everything else). By + # default returns all rules, if "only_unmanged=True" returns rules which + # are not managed by Ansible. + def _filter_rules(self, rules, table, only_unmanaged=False): + filtered_rules = [] + for line in rules.splitlines(): + if Iptables.is_rule(line): + if only_unmanaged: + tokens = self._split_rule_into_tokens(line) + # We need to check if a rule has a comment which starts with "ansible[name]" + if "--comment" in tokens: + comment_index = tokens.index("--comment") + 1 + if comment_index < len(tokens): + # Fetch the comment + comment = tokens[comment_index] + # Skip the rule if the comment starts with "ansible[name]" + if not re.match( + r"ansible\[[" + Iptables.RULE_NAME_ALLOWED_CHARS + r"]+\]", comment + ): + filtered_rules.append(line) + else: + # Fail if there is no comment after the --comment parameter + msg = ( + "Iptables rule is missing a comment after " + "the '--comment' parameter:\n{}".format(line) + ) + Iptables.module.fail_json(msg=msg) + # If it doesn"t have comment, this means it is not managed by Ansible + # and we should append it. + else: + filtered_rules.append(line) + else: + filtered_rules.append(line) + return "\n".join(filtered_rules) + + # Same as _filter_rules(), but returns custom chains + def _filter_custom_chains(self, rules, table, only_unmanaged=False): + filtered_chains = [] + # Get list of managed custom chains, which is needed to detect unmanaged custom chains + managed_custom_chains_list = self._get_custom_chains_list(table) + for line in rules.splitlines(): + if Iptables.is_custom_chain(line, table): + if only_unmanaged: + # The chain is not managed by this module if it"s not + # in the list of managed custom chains. + chain_name = self._get_custom_chain_name(line, table) + if chain_name not in managed_custom_chains_list: + filtered_chains.append(line) + else: + filtered_chains.append(line) + return "\n".join(filtered_chains) + + # Returns list of custom chains of a table. + def _get_custom_chains_list(self, table): + custom_chains_list = [] + for key, value in self._get_table_rules_dict(table).items(): + # Ignore UNMANAGED_RULES_KEY_NAME key, since we only want managed custom chains. + if key != Iptables.UNMANAGED_RULES_KEY_NAME: + for line in value["rules"].splitlines(): + if Iptables.is_custom_chain(line, table): + chain_name = self._get_custom_chain_name(line, table) + if chain_name not in custom_chains_list: + custom_chains_list.append(chain_name) + return custom_chains_list + + # Prepends "ansible[name]: " to iptables rule "--comment" argument, + # or adds "ansible[name]" as a comment if there is no comment. + def _prepend_ansible_comment(self, rules, name): + commented_lines = [] + for line in rules.splitlines(): + # Extract rules only since we cannot add comments to custom chains. + if Iptables.is_rule(line): + tokens = self._split_rule_into_tokens(line) + if "--comment" in tokens: + # If there is a comment parameter, we need to prepand "ansible[name]: ". + comment_index = tokens.index("--comment") + 1 + if comment_index < len(tokens): + # We need to remove double quotes from comments, since there + # is an incompatiblity with older iptables versions + comment_text = tokens[comment_index].replace("'", "") + tokens[comment_index] = "ansible[" + name + "]: " + comment_text + else: + # Fail if there is no comment after the --comment parameter + msg = ( + "Iptables rule is missing a comment after " + "the '--comment' parameter:\n{}".format(line) + ) + Iptables.module.fail_json(msg=msg) + else: + # If comment doesn"t exist, we add a comment "ansible[name]" + tokens += ["-m", "comment", "--comment", "ansible[" + name + "]"] + # Escape and quote tokens in case they have spaces + tokens = [self._escape_and_quote_string(x) for x in tokens] + commented_lines.append(" ".join(tokens)) + # Otherwise it"s a chain, and we should just return it. + else: + commented_lines.append(line) + return "\n".join(commented_lines) + + # Double quote a string if it contains a space and escape double quotes. + def _escape_and_quote_string(self, s): + escaped = s.replace("'", r"\"") + if re.search(r"\s", escaped): + return "'" + escaped + "'" + else: + return escaped + + # Add table rule to the state_dict. + def add_table_rule(self, table, name, weight, rules, prepend_ansible_comment=True): + self._fail_on_bad_rules(rules, table) + if prepend_ansible_comment: + self.state_dict[table]["rules_dict"][name] = { + "weight": weight, + "rules": self._prepend_ansible_comment(rules, name) + } + else: + self.state_dict[table]["rules_dict"][name] = {"weight": weight, "rules": rules} + + # Remove table rule from the state_dict. + def remove_table_rule(self, table, name): + if name in self.state_dict[table]["rules_dict"]: + del self.state_dict[table]["rules_dict"][name] + + # TODO: Add sorting of rules so that diffs in check_mode look nicer and easier to follow. + # Sorting would be done from top to bottom like this: + # * default chain policies + # * custom chains + # * rules + # + # Converts rules from a state_dict to an iptables-save readable format. + def get_table_rules(self, table): + generated_rules = "" + # We first add a header e.g. "*filter". + generated_rules += "*" + table + "\n" + rules_list = [] + custom_chains_list = [] + default_chain_policies = [] + dict_rules = self._get_table_rules_dict(table) + # Return list of rule names sorted by ("weight", "rules") tuple. + for rule_name in sorted( + dict_rules, key=lambda x: (dict_rules[x]["weight"], dict_rules[x]["rules"]) + ): + rules = dict_rules[rule_name]["rules"] + # Fail if some of the rules are bad + self._fail_on_bad_rules(rules, table) + rules_list.append(self._filter_rules(rules, table)) + custom_chains_list.append(self._filter_custom_chains(rules, table)) + default_chain_policies.append(self._filter_default_chain_policies(rules, table)) + # Clean up empty strings from these two lists. + rules_list = list(filter(None, rules_list)) + custom_chains_list = list(filter(None, custom_chains_list)) + default_chain_policies = list(filter(None, default_chain_policies)) + if default_chain_policies: + # Since iptables-restore applies the last chain policy it reads, we + # have to reverse the order of chain policies so that those with + # the lowest weight (higher priority) are read last. + generated_rules += "\n".join(reversed(default_chain_policies)) + "\n" + if custom_chains_list: + # We remove duplicate custom chains so that iptables-restore + # doesn"t fail because of that. + generated_rules += self._remove_duplicate_custom_chains( + "\n".join(sorted(custom_chains_list)), table + ) + "\n" + if rules_list: + generated_rules += "\n".join(rules_list) + "\n" + generated_rules += "COMMIT\n" + return generated_rules + + # Sets unmanaged rules for the passed table in the state_dict. + def _set_unmanaged_rules(self, table, rules): + self.add_table_rule( + table, Iptables.UNMANAGED_RULES_KEY_NAME, 90, rules, prepend_ansible_comment=False + ) + + # Clears unmanaged rules of a table. + def clear_unmanaged_rules(self, table): + self._set_unmanaged_rules(table, "") + + # Updates unmanaged rules of a table from the active rules. + def refresh_unmanaged_rules(self, table): + # Get active iptables rules and clean them up. + active_rules = self._get_active_rules(table) + unmanaged_chains_and_rules = [] + unmanaged_chains_and_rules.append( + self._filter_custom_chains(active_rules, table, only_unmanaged=True) + ) + unmanaged_chains_and_rules.append( + self._filter_rules(active_rules, table, only_unmanaged=True) + ) + # Clean items which are empty strings + unmanaged_chains_and_rules = list(filter(None, unmanaged_chains_and_rules)) + self._set_unmanaged_rules(table, "\n".join(unmanaged_chains_and_rules)) + + # Check if there are bad lines in the specified rules. + def _fail_on_bad_rules(self, rules, table): + for line in rules.splitlines(): + tokens = self._split_rule_into_tokens(line) + if "-t" in tokens or "--table" in tokens: + msg = ( + "Iptables rules cannot contain '-t/--table' parameter. " + "You should use the 'table' parameter of the module to set rules " + "for a specific table." + ) + Iptables.module.fail_json(msg=msg) + # Fail if the parameter --comment doesn"t have a comment after + if "--comment" in tokens and len(tokens) <= tokens.index("--comment") + 1: + msg = ( + "Iptables rule is missing a comment after " + "the '--comment' parameter:\n{}".format(line) + ) + Iptables.module.fail_json(msg=msg) + if not ( + Iptables.is_rule(line) or Iptables.is_custom_chain(line, table) + or Iptables.is_default_chain(line, table) or Iptables.is_comment(line) + ): + msg = ( + "Bad iptables rule '{}'! You can only use -A/--append, -N/--new-chain " + "and -P/--policy to specify rules.".format(line) + ) + Iptables.module.fail_json(msg=msg) + + # Write rules to dest path. + def _write_rules_to_file(self, rules, dest): + tmp_path = self._write_to_temp_file(rules) + Iptables.module.atomic_move(tmp_path, dest) + + # Write text to a temp file and return path to that file. + def _write_to_temp_file(self, text): + fd, path = tempfile.mkstemp() + Iptables.module.add_cleanup_file(path) # add file for cleanup later + tmp = os.fdopen(fd, "w") + tmp.write(text) + tmp.close() + return path + + # + # Public and private methods which make changes on the system + # are named "system_*" and "_system_*", respectively. + # + + # Flush all rules in a passed table. + def _system_flush_single_table_rules(self, table): + # Set all default chain policies to ACCEPT. + for chain in Iptables.DEFAULT_CHAINS[table]: + cmd = [self.bins["iptables"], "-t", table, "-P", chain, "ACCEPT"] + Iptables.module.run_command(cmd, check_rc=True) + # Then flush all rules. + cmd = [self.bins["iptables"], "-t", table, "-F"] + Iptables.module.run_command(cmd, check_rc=True) + # And delete custom chains. + cmd = [self.bins["iptables"], "-t", table, "-X"] + Iptables.module.run_command(cmd, check_rc=True) + # Update active rules in the object. + self._refresh_active_rules(table) + + # Save active iptables rules to the system path. + def _system_save_active(self, backup=False): + # Backup if needed + if backup: + Iptables.module.backup_local(self.system_save_path) + # Get iptables-save dump of all tables + all_active_rules = self._get_active_rules(table="*", clean=False) + # Move iptables-save dump of all tables to the iptables_save_path + self._write_rules_to_file(all_active_rules, self.system_save_path) + + # Apply table dict rules to the system. + def system_apply_table_rules(self, table, test=False): + dump_path = self._write_to_temp_file(self.get_table_rules(table)) + if test: + cmd = [self.bins["iptables-restore"], "-t", dump_path] + else: + cmd = [self.bins["iptables-restore"], dump_path] + rc, stdout, stderr = Iptables.module.run_command(cmd, check_rc=False) + if rc != 0: + if test: + dump_contents_file = open(dump_path, "r") + dump_contents = dump_contents_file.read() + dump_contents_file.close() + msg = "There is a problem with the iptables rules:" \ + + "\n\nError message:\n" \ + + stderr \ + + "\nGenerated rules:\n#######\n" \ + + dump_contents + "#####" + else: + msg = "Could not load iptables rules:\n\n" + stderr + Iptables.module.fail_json(msg=msg) + self._refresh_active_rules(table) + + # Flush one or all tables (to flush all tables pass table="*"). + def system_flush_table_rules(self, table): + if table == "*": + for tbl in Iptables.TABLES: + self._delete_table(tbl) + if self._single_table_needs_flush(tbl): + self._system_flush_single_table_rules(tbl) + # Only flush the specified table. + else: + self._delete_table(table) + if self._single_table_needs_flush(table): + self._system_flush_single_table_rules(table) + + # Saves state file and system iptables rules. + def system_save(self, backup=False): + self._system_save_active(backup=backup) + rules = json.dumps(self.state_dict, sort_keys=True, indent=4, separators=(",", ": ")) + self._write_rules_to_file(rules, self.state_save_path) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + ipversion=dict(required=False, choices=["4", "6"], type="str", default="4"), + state=dict( + required=False, choices=["present", "absent"], default="present", type="str" + ), + weight=dict(required=False, type="int", default=40), + name=dict(required=True, type="str"), + table=dict( + required=False, choices=Iptables.TABLES + ["*"], default="filter", type="str" + ), + rules=dict(required=False, type="str", default=""), + backup=dict(required=False, type="bool", default=False), + keep_unmanaged=dict(required=False, type="bool", default=True), + ), + supports_check_mode=True, + ) + + check_mode = module.check_mode + changed = False + ipversion = module.params["ipversion"] + state = module.params["state"] + weight = module.params["weight"] + name = module.params["name"] + table = module.params["table"] + rules = module.params["rules"] + backup = module.params["backup"] + keep_unmanaged = module.params["keep_unmanaged"] + + kw = dict( + state=state, + name=name, + rules=rules, + weight=weight, + ipversion=ipversion, + table=table, + backup=backup, + keep_unmanaged=keep_unmanaged + ) + + iptables = Iptables(module, ipversion) + + # Acquire lock so that only one instance of this object can exist. + # Fail if the lock cannot be acquired within 10 seconds. + iptables.acquire_lock_or_exit(wait_for_seconds=10) + + # Clean up rules of comments and empty lines. + rules = Iptables.clean_up_rules(rules) + + # Check additional parameter requirements + if state == "present" and name == "*": + module.fail_json(msg="Parameter 'name' can only be '*' if 'state=absent'") + if state == "present" and table == "*": + module.fail_json(msg="Parameter 'table' can only be '*' if 'name=*' and 'state=absent'") + if state == "present" and not name: + module.fail_json(msg="Parameter 'name' cannot be empty") + if state == "present" and not re.match("^[" + Iptables.RULE_NAME_ALLOWED_CHARS + "]+$", name): + module.fail_json( + msg="Parameter 'name' not valid! It can only contain alphanumeric characters, " + "underscore, hyphen, or a space, got: '{}'".format(name) + ) + if weight < 0 or weight > 99: + module.fail_json(msg="Parameter 'weight' can be 0-99, got: {}".format(weight)) + if state == "present" and rules == "": + module.fail_json(msg="Parameter 'rules' cannot be empty when 'state=present'") + + # Flush rules of one or all tables + if state == "absent" and name == "*": + # Check if table(s) need to be flushed + if iptables.table_needs_flush(table): + changed = True + if not check_mode: + # Flush table(s) + iptables.system_flush_table_rules(table) + # Save state and system iptables rules + iptables.system_save(backup=backup) + # Exit since there is nothing else to do + kw["changed"] = changed + module.exit_json(**kw) + + # Initialize new iptables object which will store new rules + iptables_new = Iptables(module, ipversion) + + if state == "present": + iptables_new.add_table_rule(table, name, weight, rules) + else: + iptables_new.remove_table_rule(table, name) + + if keep_unmanaged: + iptables_new.refresh_unmanaged_rules(table) + else: + iptables_new.clear_unmanaged_rules(table) + + # Refresh saved table dump with active iptables rules + iptables_new.refresh_saved_table_dump(table) + + # Check if there are changes in iptables, and if yes load new rules + if iptables != iptables_new: + + changed = True + + # Test generated rules + iptables_new.system_apply_table_rules(table, test=True) + + if check_mode: + # Create a predicted diff for check_mode. + # Diff will be created from rules generated from the state dictionary. + if hasattr(module, "_diff") and module._diff: + # Update unmanaged rules in the old object so the generated diff + # from the rules dictionaries is more accurate. + iptables.refresh_unmanaged_rules(table) + # Generate table rules from rules dictionaries. + table_rules_old = iptables.get_table_rules(table) + table_rules_new = iptables_new.get_table_rules(table) + # If rules generated from dicts are not equal, we generate a diff from them. + if table_rules_old != table_rules_new: + kw["diff"] = generate_diff(table_rules_old, table_rules_new) + else: + # TODO: Update this comment to be better. + kw["diff"] = { + "prepared": + "System rules were not changed (e.g. rule " + "weight changed, redundant rule, etc)" + } + else: + # We need to fetch active table dump before we apply new rules + # since we will need them to generate a diff. + table_active_rules = iptables_new.get_saved_table_dump(table) + + # Apply generated rules. + iptables_new.system_apply_table_rules(table) + + # Refresh saved table dump with active iptables rules. + iptables_new.refresh_saved_table_dump(table) + + # Save state and system iptables rules. + iptables_new.system_save(backup=backup) + + # Generate a diff. + if hasattr(module, "_diff") and module._diff: + table_active_rules_new = iptables_new.get_saved_table_dump(table) + if table_active_rules != table_active_rules_new: + kw["diff"] = generate_diff(table_active_rules, table_active_rules_new) + else: + # TODO: Update this comment to be better. + kw["diff"] = { + "prepared": + "System rules were not changed (e.g. rule " + "weight changed, redundant rule, etc)" + } + + kw["changed"] = changed + module.exit_json(**kw) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/openssl_pkcs12.py b/plugins/modules/openssl_pkcs12.py new file mode 100644 index 0000000..454182b --- /dev/null +++ b/plugins/modules/openssl_pkcs12.py @@ -0,0 +1,415 @@ +# -*- coding: utf-8 -*- +"""OpenSSL PKCS12 module.""" + +ANSIBLE_METADATA = {"metadata_version": "1.0", "status": ["preview"], "supported_by": "community"} + +DOCUMENTATION = """ +--- +module: openssl_pkcs12 +author: "Guillaume Delpierre (@gdelpierre)" +version_added: "2.4" +short_description: Generate OpenSSL pkcs12 archive. +description: + - "This module allows one to (re-)generate PKCS#12." +requirements: + - "python-pyOpenSSL" +options: + ca_certificates: + required: False + description: + - List of CA certificate to include. + cert_path: + required: False + description: + - The path to read certificates and private keys from. + Must be in PEM format. + action: + required: False + default: "export" + choices: ["parse", "export"] + description: + - Create (export) or parse a PKCS#12. + src: + required: False + description: + - PKCS#12 file path to parse. + path: + required: True + default: null + description: + - Filename to write the PKCS#12 file to. + force: + required: False + default: False + description: + - Should the file be regenerated even it it already exists. + friendly_name: + required: False + default: null + aliases: "name" + description: + - Specifies the friendly name for the certificate and private key. + iter_size: + required: False + default: 2048 + description: + - Number of times to repeat the encryption step. + maciter_size: + required: False + default: 1 + description: + - Number of times to repeat the MAC step. + mode: + required: False + default: 0400 + description: + - Default mode for the generated PKCS#12 file. + passphrase: + required: False + default: null + description: + - The PKCS#12 password. + privatekey_path: + required: False + description: + - File to read private key from. + privatekey_passphrase: + required: False + default: null + description: + - Passphrase source to decrypt any input private keys with. + state: + required: False + default: "present" + choices: ["present", "absent"] + description: + - Whether the file should exist or not. +""" + +EXAMPLES = """ +- name: "Generate PKCS#12 file" + openssl_pkcs12: + path: "/opt/certs/ansible.p12" + friendly_name: "raclette" + privatekey_path: "/opt/certs/keys/key.pem" + cert_path: "/opt/certs/cert.pem" + ca_certificates: "/opt/certs/ca.pem" + state: present + +- name: "Change PKCS#12 file permission" + openssl_pkcs12: + path: "/opt/certs/ansible.p12" + friendly_name: "raclette" + privatekey_path: "/opt/certs/keys/key.pem" + cert_path: "/opt/certs/cert.pem" + ca_certificates: "/opt/certs/ca.pem" + state: present + mode: 0600 + +- name: "Regen PKCS#12 file" + openssl_pkcs12: + path: "/opt/certs/ansible.p12" + friendly_name: "raclette" + privatekey_path: "/opt/certs/keys/key.pem" + cert_path: "/opt/certs/cert.pem" + ca_certificates: "/opt/certs/ca.pem" + state: present + mode: 0600 + force: True + +- name: "Dump/Parse PKCS#12 file" + openssl_pkcs12: + src: "/opt/certs/ansible.p12" + path: "/opt/certs/ansible.pem" + state: present + +- name: "Remove PKCS#12 file" + openssl_pkcs12: + path: "/opt/certs/ansible.p12" + state: absent +""" + +RETURN = """ +filename: + description: Path to the generate PKCS#12 file. + returned: changed or success + type: string + sample: /opt/certs/ansible.p12 +""" + +import errno +import os + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule + +try: + from OpenSSL import crypto +except ImportError: + pyopenssl_found = False +else: + pyopenssl_found = True + + +class PkcsError(Exception): + pass + + +class Pkcs(object): + + def __init__(self, module): + self.path = module.params["path"] + self.force = module.params["force"] + self.state = module.params["state"] + self.action = module.params["action"] + self.check_mode = module.check_mode + self.iter_size = module.params["iter_size"] + self.maciter_size = module.params["maciter_size"] + self.pkcs12 = None + self.src = module.params["src"] + self.privatekey_path = module.params["privatekey_path"] + self.privatekey_passphrase = module.params["privatekey_passphrase"] + self.cert_path = module.params["cert_path"] + self.ca_certificates = module.params["ca_certificates"] + self.friendly_name = module.params["friendly_name"] + self.passphrase = module.params["passphrase"] + self.mode = module.params["mode"] + self.changed = False + if not self.mode: + self.mode = int("0400", 8) + + def load_privatekey(self, path, passphrase=None): + """Load the specified OpenSSL private key.""" + try: + if passphrase: + privatekey = crypto.load_privatekey( + crypto.FILETYPE_PEM, + open(path, "rb").read(), passphrase + ) + else: + privatekey = crypto.load_privatekey(crypto.FILETYPE_PEM, open(path, "rb").read()) + + return privatekey + except (IOError, OSError) as exc: + raise PkcsError(exc) + + def load_certificate(self, path): + """Load the specified certificate.""" + try: + cert_content = open(path, "rb").read() + cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_content) + return cert + except (IOError, OSError) as exc: + raise PkcsError(exc) + + def load_pkcs12(self, path, passphrase=None): + """Load pkcs12 file.""" + try: + if passphrase: + return crypto.load_pkcs12(open(path, "rb").read(), passphrase) + else: + return crypto.load_pkcs12(open(path, "rb").read()) + except (IOError, OSError) as exc: + raise PkcsError(exc) + + def dump_privatekey(self, path): + """Dump the specified OpenSSL private key.""" + try: + return crypto.dump_privatekey( + crypto.FILETYPE_PEM, + self.load_pkcs12(path).get_privatekey() + ) + except (IOError, OSError) as exc: + raise PkcsError(exc) + + def dump_certificate(self, path): + """Dump the specified certificate.""" + try: + return crypto.dump_certificate( + crypto.FILETYPE_PEM, + self.load_pkcs12(path).get_certificate() + ) + except (IOError, OSError) as exc: + raise PkcsError(exc) + + def generate(self, module): + """Generate PKCS#12 file archive.""" + if not os.path.exists(self.path) or self.force: + self.pkcs12 = crypto.PKCS12() + + try: + self.remove() + except PkcsError as exc: + module.fail_json(msg=to_native(exc)) + + if self.ca_certificates: + ca_certs = [self.load_certificate(ca_cert) for ca_cert in self.ca_certificates] + self.pkcs12.set_ca_certificates(ca_certs) + + if self.cert_path: + self.pkcs12.set_certificate(self.load_certificate(self.cert_path)) + + if self.friendly_name: + self.pkcs12.set_friendlyname(self.friendly_name) + + if self.privatekey_path: + self.pkcs12.set_privatekey( + self.load_privatekey(self.privatekey_path, self.privatekey_passphrase) + ) + + try: + with open(self.path, "wb", self.mode) as archive: + archive.write( + self.pkcs12.export(self.passphrase, self.iter_size, self.maciter_size) + ) + module.set_mode_if_different(self.path, self.mode, False) + self.changed = True + except (IOError, OSError) as exc: + self.remove() + raise PkcsError(exc) + + file_args = module.load_file_common_arguments(module.params) + if module.set_fs_attributes_if_different(file_args, False): + module.set_mode_if_different(self.path, self.mode, False) + self.changed = True + + def parse(self, module): + """Read PKCS#12 file.""" + if not os.path.exists(self.path) or self.force: + try: + self.remove() + + with open(self.path, "wb") as content: + content.write( + "{0}{1}".format( + self.dump_privatekey(self.src), self.dump_certificate(self.src) + ) + ) + module.set_mode_if_different(self.path, self.mode, False) + self.changed = True + except IOError as exc: + raise PkcsError(exc) + + file_args = module.load_file_common_arguments(module.params) + if module.set_fs_attributes_if_different(file_args, False): + module.set_mode_if_different(self.path, self.mode, False) + self.changed = True + + def remove(self): + """Remove the PKCS#12 file archive from the filesystem.""" + try: + os.remove(self.path) + self.changed = True + except OSError as exc: + if exc.errno != errno.ENOENT: + raise PkcsError(exc) + else: + pass + + def check(self, module, perms_required=True): + + def _check_pkey_passphrase(): + if self.privatekey_passphrase: + try: + self.load_privatekey(self.path, self.privatekey_passphrase) + return True + except crypto.Error: + return False + return True + + if not os.path.exists(self.path): + return os.path.exists(self.path) + + return _check_pkey_passphrase + + def dump(self): + """Serialize the object into a dictionary.""" + result = { + "changed": self.changed, + "filename": self.path, + } + + if self.privatekey_path: + result["privatekey_path"] = self.privatekey_path + + return result + + +def main(): + argument_spec = dict( + action=dict(default="export", choices=["parse", "export"], type="str"), + ca_certificates=dict(type="list"), + cert_path=dict(type="path"), + force=dict(default=False, type="bool"), + friendly_name=dict(type="str", aliases=["name"]), + iter_size=dict(default=2048, type="int"), + maciter_size=dict(default=1, type="int"), + passphrase=dict(type="str", no_log=True), + path=dict(required=True, type="path"), + privatekey_path=dict(type="path"), + privatekey_passphrase=dict(type="str", no_log=True), + state=dict(default="present", choices=["present", "absent"], type="str"), + src=dict(type="path"), + ) + + required_if = [ + ["action", "export", ["friendly_name"]], + ["action", "parse", ["src"]], + ] + + required_together = [ + ["privatekey_path", "friendly_name"], + ] + + module = AnsibleModule( + argument_spec=argument_spec, + add_file_common_args=True, + required_if=required_if, + required_together=required_together, + supports_check_mode=True, + ) + + if not pyopenssl_found: + module.fail_json(msg="The python pyOpenSSL library is required") + + base_dir = os.path.dirname(module.params["path"]) + if not os.path.isdir(base_dir): + module.fail_json( + name=base_dir, + msg="The directory {0} does not exist or " + "the file is not a directory".format(base_dir) + ) + + pkcs12 = Pkcs(module) + + if module.params["state"] == "present": + if module.check_mode: + result = pkcs12.dump() + result["changed"] = module.params["force"] or not pkcs12.check(module) + module.exit_json(**result) + + try: + if module.params["action"] == "export": + pkcs12.generate(module) + else: + pkcs12.parse(module) + except PkcsError as exc: + module.fail_json(msg=to_native(exc)) + else: + if module.check_mode: + result = pkcs12.dump() + result["changed"] = os.path.exists(module.params["path"]) + module.exit_json(**result) + + try: + pkcs12.remove() + except PkcsError as exc: + module.fail_json(msg=to_native(exc)) + + result = pkcs12.dump() + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py new file mode 100644 index 0000000..92c6098 --- /dev/null +++ b/plugins/modules/proxmox_kvm.py @@ -0,0 +1,1335 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2016, Abdoul Bah (@helldorado) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Module to control Proxmox KVM machines.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import re +import time +import traceback + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule + +__metaclass__ = type +ANSIBLE_METADATA = {"metadata_version": "1.1", "status": ["preview"], "supported_by": "community"} + +DOCUMENTATION = """ +--- +module: proxmox_kvm +short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster. +description: + - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster. +version_added: "2.3" +author: "Abdoul Bah (@helldorado) , Thijs Cramer " +options: + acpi: + description: + - Specify if ACPI should be enabled/disabled. + type: bool + default: "yes" + agent: + description: + - Specify if the QEMU Guest Agent should be enabled/disabled. + type: bool + args: + description: + - Pass arbitrary arguments to kvm. + - This option is for experts only! + default: "-serial unix:/var/run/qemu-server/VMID.serial,server,nowait" + api_host: + description: + - Specify the target host of the Proxmox VE cluster. + required: true + api_user: + description: + - Specify the user to authenticate with. + required: true + api_password: + description: + - Specify the password to authenticate with. + - You can use C(PROXMOX_PASSWORD) environment variable. + autostart: + description: + - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API). + type: bool + default: "no" + balloon: + description: + - Specify the amount of RAM for the VM in MB. + - Using zero disables the balloon driver. + default: 0 + bios: + description: + - Specify the BIOS implementation. + choices: ["seabios", "ovmf"] + boot: + description: + - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n). + - You can combine to set order. + default: cnd + bootdisk: + description: + - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+) + ciuser: + version_added: 2.6 + description: + - Set username used in Cloud-Init Config. + cipassword: + version_added: 2.6 + description: + - Set password used in Cloud-Init Config (NOT RECOMMENDED, use sshkeys instead). + citype: + version_added: 2.6 + description: + - Specifies the cloud-init configuration format. + choices: ["nocloud", "configdrive2"] + clone: + description: + - Name of VM to be cloned. If C(vmid) is setted, C(clone) can take arbitrary value but required for intiating the clone. + cores: + description: + - Specify number of cores per socket. + default: 1 + cpu: + description: + - Specify emulated CPU type. + default: kvm64 + cpulimit: + description: + - Specify if CPU usage will be limited. Value 0 indicates no CPU limit. + - If the computer has 2 CPUs, it has total of "2" CPU time + cpuunits: + description: + - Specify CPU weight for a VM. + - You can disable fair-scheduler configuration by setting this to 0 + default: 1000 + delete: + description: + - Specify a list of settings you want to delete. + description: + description: + - Specify the description for the VM. Only used on the configuration web interface. + - This is saved as comment inside the configuration file. + digest: + description: + - Specify if to prevent changes if current configuration file has different SHA1 digest. + - This can be used to prevent concurrent modifications. + force: + description: + - Allow to force stop VM. + - Can be used only with states C(stopped), C(restarted). + type: bool + format: + description: + - Target drive"s backing file"s data format. + - Used only with clone + default: qcow2 + choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk" ] + freeze: + description: + - Specify if PVE should freeze CPU at startup (use "c" monitor command to start execution). + type: bool + full: + description: + - Create a full copy of all disk. This is always done when you clone a normal VM. + - For VM templates, we try to create a linked clone by default. + - Used only with clone + type: bool + default: "yes" + hostpci: + description: + - Specify a hash/dictionary of map host pci devices into guest. C(hostpci="{"key":"value", "key":"value"}"). + - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N. + - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0""). + - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers). + - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model). + - C(rombar=boolean) I(default=1) Specify whether or not the device"s ROM will be visible in the guest"s memory map. + - C(x-vga=boolean) I(default=0) Enable vfio-vga device support. + - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care. + hotplug: + description: + - Selectively enable hotplug features. + - This is a comma separated list of hotplug features C("network", "disk", "cpu", "memory" and "usb"). + - Value 0 disables hotplug completely and value 1 is an alias for the default C("network,disk,usb"). + hugepages: + description: + - Enable/disable hugepages memory. + choices: ["any", "2", "1024"] + ide: + description: + - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide="{"key":"value", "key":"value"}"). + - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive"s backing file"s data format. C(qcow2|raw|subvol). + ipconfig: + version_added: 2.6 + description: + - A hash/dictionary of ip"s used in the Cloud-Init Configuration. + - Keys allowed are - C(ipconfig[n]). + - "Values allowed are - C("gw=IP/CIDR,gw6=IP6/CIDR,ip=IP/CIDR,ip6=IP/CIDR")." + - C(gw) is the IPv4 Default Gateway. + - C(gw6) is the IPv6 Default Gateway. + - C(ip) is the IPv4 IP Address. + - C(ip6) is the IPv6 IP Address. + keyboard: + description: + - Sets the keyboard layout for VNC server. + kvm: + description: + - Enable/disable KVM hardware virtualization. + type: bool + default: "yes" + localtime: + description: + - Sets the real time clock to local time. + - This is enabled by default if ostype indicates a Microsoft OS. + type: bool + lock: + description: + - Lock/unlock the VM. + choices: ["migrate", "backup", "snapshot", "rollback"] + machine: + description: + - Specifies the Qemu machine type. + - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?)) + memory: + description: + - Memory size in MB for instance. + default: 512 + migrate_downtime: + description: + - Sets maximum tolerated downtime (in seconds) for migrations. + migrate_speed: + description: + - Sets maximum speed (in MB/s) for migrations. + - A value of 0 is no limit. + name: + description: + - Specifies the VM name. Only used on the configuration web interface. + - Required only for C(state=present). + nameserver: + version_added: 2.6 + description: + - Specifies the DNS Nameserver used by Cloud-Init Config. + net: + description: + - A hash/dictionary of network interfaces for the VM. C(net="{"key":"value", "key":"value"}"). + - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N. + - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",brigde="value",rate="value",tag="value",firewall="1|0",trunks="vlanid""). + - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3). + - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified. + - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called "vmbr0". + - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is "Megabytes per second". + - If you specify no bridge, we create a kvm "user" (NATed) network device, which provides DHCP and DNS services. + newid: + description: + - VMID for the clone. Used only with clone. + - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI. + node: + description: + - Proxmox VE node, where the new VM will be created. + - Only required for C(state=present). + - For other states, it will be autodiscovered. + numa: + description: + - A hash/dictionaries of NUMA topology. C(numa="{"key":"value", "key":"value"}"). + - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N. + - Values allowed are - C("cpu="",hostnodes="",memory="number",policy="(bind|interleave|preferred)""). + - C(cpus) CPUs accessing this NUMA node. + - C(hostnodes) Host NUMA nodes to use. + - C(memory) Amount of memory this NUMA node provides. + - C(policy) NUMA allocation policy. + onboot: + description: + - Specifies whether a VM will be started during system bootup. + type: bool + default: "yes" + ostype: + description: + - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems. + - The l26 is Linux 2.6/3.X Kernel. + choices: ["other", "wxp", "w2k", "w2k3", "w2k8", "wvista", "win7", "win8", "l24", "l26", "solaris"] + default: l26 + parallel: + description: + - A hash/dictionary of map host parallel devices. C(parallel="{"key":"value", "key":"value"}"). + - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2. + - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+"). + pool: + description: + - Add the new VM to the specified pool. + protection: + description: + - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations. + type: bool + reboot: + description: + - Allow reboot. If set to C(yes), the VM exit on reboot. + type: bool + revert: + description: + - Revert a pending change. + sata: + description: + - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata="{"key":"value", "key":"value"}"). + - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive"s backing file"s data format. C(qcow2|raw|subvol). + scsi: + description: + - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi="{"key":"value", "key":"value"}"). + - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive"s backing file"s data format. C(qcow2|raw|subvol). + scsihw: + description: + - Specifies the SCSI controller model. + choices: ["lsi", "lsi53c810", "virtio-scsi-pci", "virtio-scsi-single", "megasas", "pvscsi"] + searchdomain: + version_added: 2.6 + description: + - The DNS Search Domain used by Cloud-Init Config. + serial: + description: + - A hash/dictionary of serial device to create inside the VM. C("{"key":"value", "key":"value"}"). + - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3. + - Values allowed are - C((/dev/.+|socket)). + - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care. + shares: + description: + - Rets amount of memory shares for auto-ballooning. (0 - 50000). + - The larger the number is, the more memory this VM gets. + - The number is relative to weights of all other running VMs. + - Using 0 disables auto-ballooning, this means no limit. + skiplock: + description: + - Ignore locks + - Only root is allowed to use this option. + smbios: + description: + - Specifies SMBIOS type 1 fields. + snapname: + description: + - The name of the snapshot. Used only with clone. + sockets: + description: + - Sets the number of CPU sockets. (1 - N). + default: 1 + sshkeys: + version_added: 2.6 + description: + - The SSH Keys used by Cloud-Init Config (OpenSSH Format). + startdate: + description: + - Sets the initial date of the real time clock. + - Valid format for date are C("now") or C("2016-09-25T16:01:21") or C("2016-09-25"). + startup: + description: + - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]). + - Order is a non-negative number defining the general startup order. + - Shutdown in done with reverse ordering. + state: + description: + - Indicates desired state of the instance. + - If C(current), the current state of the VM will be fecthed. You can access it with C(results.status) + choices: ["present", "started", "absent", "stopped", "restarted","current"] + default: present + storage: + description: + - Target storage for full clone. + tablet: + description: + - Enables/disables the USB tablet device. + type: bool + default: "no" + target: + description: + - Target node. Only allowed if the original VM is on shared storage. + - Used only with clone + tdf: + description: + - Enables/disables time drift fix. + type: bool + template: + description: + - Enables/disables the template. + type: bool + default: "no" + timeout: + description: + - Timeout for operations. + default: 30 + update: + description: + - If C(yes), the VM will be update with new value. + - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters + - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk... + type: bool + default: "no" + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + type: bool + default: "no" + vcpus: + description: + - Sets number of hotplugged vcpus. + vga: + description: + - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option "std" or "vmware". + choices: ["std", "cirrus", "vmware", "qxl", "serial0", "serial1", "serial2", "serial3", "qxl2", "qxl3", "qxl4"] + default: std + virtio: + description: + - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio="{"key":"value", "key":"value"}"). + - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive"s backing file"s data format. C(qcow2|raw|subvol). + vmid: + description: + - Specifies the VM ID. Instead use I(name) parameter. + - If vmid is not set, the next available VM ID will be fetched from ProxmoxAPI. + watchdog: + description: + - Creates a virtual hardware watchdog device. +requirements: [ "proxmoxer", "requests" ] +""" # noqa + +EXAMPLES = """ +# Create new VM with minimal options +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + +# Create new VM with minimal options and given vmid +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + vmid : 100 + +# Create new VM with two network interface options. +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + net : "{"net0":"virtio,bridge=vmbr1,rate=200", "net1":"e1000,bridge=vmbr2,"}" + +# Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus. +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + net : "{"net0":"virtio,bridge=vmbr1,rate=200"}" + virtio : "{"virtio0":"VMs_LVM:10", "virtio1":"VMs:2,format=qcow2", "virtio2":"VMs:5,format=raw"}" + cores : 4 + vcpus : 2 + +# Clone VM with only source VM name +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + clone : spynal # The VM source + name : zavala # The target VM name + node : sabrewulf + storage : VMs + format : qcow2 + timeout : 500 # Note: The task can take a while. Adapt + +# Clone VM with source vmid and target newid and raw format +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + clone : arbitrary_name + vmid : 108 + newid : 152 + name : zavala # The target VM name + node : sabrewulf + storage : LVM_STO + format : raw + timeout : 300 # Note: The task can take a while. Adapt + +# Create new VM and lock it for snapashot. +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + lock : snapshot + +# Create new VM and set protection to disable the remove VM and remove disk operations +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + protection : yes + +# Start VM +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : started + +# Stop VM +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : stopped + +# Stop VM with force +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : stopped + force : yes + +# Restart VM +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : restarted + +# Remove VM +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : absent + +# Get VM current state +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : current + +# Update VM configuration +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + cpu : 8 + memory : 16384 + update : yes + +# Delete QEMU parameters +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + delete : "args,template,cpulimit" + +# Revert a pending change +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + revert : "template,cpulimit" +""" # noqa + +RETURN = """ +devices: + description: The list of devices created or used. + returned: success + type: dict + sample: " + { + "ide0": "VMS_LVM:vm-115-disk-1", + "ide1": "VMs:115/vm-115-disk-3.raw", + "virtio0": "VMS_LVM:vm-115-disk-2", + "virtio1": "VMs:115/vm-115-disk-1.qcow2", + "virtio2": "VMs:115/vm-115-disk-2.raw" + }" +mac: + description: List of mac address created and net[n] attached. Useful when you want to use provision systems like Foreman via PXE. + returned: success + type: dict + sample: " + { + "net0": "3E:6E:97:D2:31:9F", + "net1": "B6:A1:FC:EF:78:A4" + }" +vmid: + description: The VM vmid. + returned: success + type: int + sample: 115 +status: + description: + - The current virtual machine status. + - Returned only when C(state=current) + returned: success + type: dict + sample: "{ + "changed": false, + "msg": "VM kropta with vmid = 110 is running", + "status": "running" + }" +""" # noqa + +try: + from proxmoxer import ProxmoxAPI + HAS_PROXMOXER = True +except ImportError: + HAS_PROXMOXER = False + +VZ_TYPE = "qemu" + + +def get_nextvmid(module, proxmox): + try: + vmid = proxmox.cluster.nextid.get() + return vmid + except Exception as e: + module.fail_json( + msg="Unable to get next vmid. Failed with exception: {}".format(to_native(e)), + exception=traceback.format_exc() + ) + + +def get_vmid(proxmox, name): + return [vm["vmid"] for vm in proxmox.cluster.resources.get(type="vm") if vm["name"] == name] + + +def get_vm(proxmox, vmid): + return [vm for vm in proxmox.cluster.resources.get(type="vm") if vm["vmid"] == int(vmid)] + + +def node_check(proxmox, node): + return [True for nd in proxmox.nodes.get() if nd["node"] == node] + + +def get_vminfo(module, proxmox, node, vmid, **kwargs): + global results # noqa + results = {} + mac = {} + devices = {} + try: + vm = proxmox.nodes(node).qemu(vmid).config.get() + except Exception as e: + module.fail_json( + msg="Getting information for VM with vmid={0} failed with exception: {1}". + format(vmid, e) + ) + + # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. + kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + + # Convert all dict in kwargs to elements. + # For hostpci[n], ide[n], ipconfig[n], net[n], numa[n], + # parallel[n], sata[n], scsi[n], serial[n], virtio[n] ## noqa + for k in kwargs.keys(): + if isinstance(kwargs[k], dict): + kwargs.update(kwargs[k]) + del kwargs[k] + + # Split information by type + for k, v in kwargs.items(): + if re.match(r"net[0-9]", k) is not None: + interface = k + k = vm[k] + k = re.search("=(.*?),", k).group(1) + mac[interface] = k + if ( + re.match(r"virtio[0-9]", k) is not None or re.match(r"ide[0-9]", k) is not None + or re.match(r"ipconfig[0-9]", k) is not None or re.match(r"scsi[0-9]", k) is not None + or re.match(r"sata[0-9]", k) is not None + ): + device = k + k = vm[k] + k = re.search("(.*?),", k).group(1) + devices[device] = k + + results["mac"] = mac + results["devices"] = devices + results["vmid"] = int(vmid) + + +def settings(module, proxmox, vmid, node, name, timeout, **kwargs): + proxmox_node = proxmox.nodes(node) + + # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. + kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + + if getattr(proxmox_node, VZ_TYPE)(vmid).config.set(**kwargs) is None: + return True + else: + return False + + +def create_vm( + module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, timeout, update, + **kwargs +): + # Available only in PVE 4 + only_v4 = ["force", "protection", "skiplock"] + + # valide clone parameters + valid_clone_params = ["format", "full", "pool", "snapname", "storage", "target"] + clone_params = {} + # Default args for vm. Note: -args option is for experts only. + # It allows you to pass arbitrary arguments to kvm. + vm_args = "-serial unix:/var/run/qemu-server/{}.serial,server,nowait".format(vmid) + + proxmox_node = proxmox.nodes(node) + + # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. + kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool))) + + # Verify Cloud-Init support + if PVE_FULL_VERSION < 5.2: # noqa + if ( + "ciuser" in kwargs or "cipassword" in kwargs or "citype" in kwargs + or "ipconfig" in kwargs or "nameserver" in kwargs or "searchdomain" in kwargs + or "sshkeys" in kwargs + ): + module.fail_json( + msg="Cloud-Init is not supported on Proxmox Versions" + " older than 5.2, your version: {}".format(PVE_FULL_VERSION) # noqa + ) + + # The features work only on PVE 4 + if PVE_MAJOR_VERSION < 4: # noqa + for p in only_v4: + if p in kwargs: + del kwargs[p] + + # If update, don"t update disk (virtio, ide, sata, scsi) and network interface + if update: + if "virtio" in kwargs: + del kwargs["virtio"] + if "sata" in kwargs: + del kwargs["sata"] + if "scsi" in kwargs: + del kwargs["scsi"] + if "ide" in kwargs: + del kwargs["ide"] + + # Convert all dict in kwargs to elements. + # For hostpci[n], ide[n], net[n], numa[n], parallel[n], + # sata[n], scsi[n], serial[n], virtio[n] ## noqa + for k in kwargs.keys(): + if isinstance(kwargs[k], dict): + kwargs.update(kwargs[k]) + del kwargs[k] + + # Rename numa_enabled to numa. According the API documentation + if "numa_enabled" in kwargs: + kwargs["numa"] = kwargs["numa_enabled"] + del kwargs["numa_enabled"] + + # -args and skiplock require root@pam user + if module.params["api_user"] == "root@pam" and module.params["args"] is None: + if not update: + kwargs["args"] = vm_args + elif module.params["api_user"] == "root@pam" and module.params["args"] is not None: + kwargs["args"] = module.params["args"] + elif module.params["api_user"] != "root@pam" and module.params["args"] is not None: + module.fail_json(msg="args parameter require root@pam user. ") + + if module.params["api_user"] != "root@pam" and module.params["skiplock"] is not None: + module.fail_json(msg="skiplock parameter require root@pam user. ") + + if update: + if getattr(proxmox_node, VZ_TYPE)(vmid).config.set( + name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs + ) is None: + return True + else: + return False + elif module.params["clone"] is not None: + for param in valid_clone_params: + if module.params[param] is not None: + clone_params[param] = module.params[param] + clone_params.update( + dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool)) + ) + taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params) + else: + taskid = getattr(proxmox_node, VZ_TYPE).create( + vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs + ) + + while timeout: + if ( + proxmox_node.tasks(taskid).status.get()["status"] == "stopped" + and proxmox_node.tasks(taskid).status.get()["exitstatus"] == "OK" + ): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json( + msg="Reached timeout while waiting for creating VM." + " Last line in task before timeout: {}". + format(proxmox_node.tasks(taskid).log.get()[:1]) + ) + time.sleep(1) + return False + + +def start_vm(module, proxmox, vm, vmid, timeout): + taskid = getattr(proxmox.nodes(vm[0]["node"]), VZ_TYPE)(vmid).status.start.post() + while timeout: + if ( + proxmox.nodes(vm[0]["node"]).tasks(taskid).status.get()["status"] == "stopped" + and proxmox.nodes(vm[0]["node"]).tasks(taskid).status.get()["exitstatus"] == "OK" + ): + return True + timeout -= 1 + if timeout == 0: + message = ( + "Reached timeout while waiting for starting VM." + " Last line in task before timeout: {}".format( + proxmox.nodes(vm[0]["node"]).tasks(taskid).log.get()[:1] + ) + ) + module.fail_json(msg=message) + + time.sleep(1) + return False + + +def stop_vm(module, proxmox, vm, vmid, timeout, force): + if force: + taskid = getattr(proxmox.nodes(vm[0]["node"]), + VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) + else: + taskid = getattr(proxmox.nodes(vm[0]["node"]), VZ_TYPE)(vmid).status.shutdown.post() + while timeout: + if ( + proxmox.nodes(vm[0]["node"]).tasks(taskid).status.get()["status"] == "stopped" + and proxmox.nodes(vm[0]["node"]).tasks(taskid).status.get()["exitstatus"] == "OK" + ): + return True + timeout -= 1 + if timeout == 0: + message = ( + "Reached timeout while waiting for stopping VM." + " Last line in task before timeout: {}".format( + proxmox.nodes(vm[0]["node"]).tasks(taskid).log.get()[:1] + ) + ) + module.fail_json(msg=message) + time.sleep(1) + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + acpi=dict(type="bool", default="yes"), + agent=dict(type="bool"), + args=dict(type="str", default=None), + api_host=dict(required=True), + api_user=dict(required=True), + api_password=dict(no_log=True), + autostart=dict(type="bool", default="no"), + balloon=dict(type="int", default=0), + bios=dict(choices=["seabios", "ovmf"]), + boot=dict(type="str", default="cnd"), + bootdisk=dict(type="str"), + ciuser=dict(type="str", default="root"), + cipassword=dict(type="str"), + citype=dict(type="str", default=None, choices=["nocloud", "configdrive2"]), + clone=dict(type="str", default=None), + cores=dict(type="int", default=1), + cpu=dict(type="str", default="kvm64"), + cpulimit=dict(type="int"), + cpuunits=dict(type="int", default=1000), + delete=dict(type="str", default=None), + description=dict(type="str"), + digest=dict(type="str"), + force=dict(type="bool", default=None), + format=dict( + type="str", + default="qcow2", + choices=["cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk"] + ), + freeze=dict(type="bool"), + full=dict(type="bool", default="yes"), + hostpci=dict(type="dict"), + hotplug=dict(type="str"), + hugepages=dict(choices=["any", "2", "1024"]), + ide=dict(type="dict", default=None), + ipconfig=dict(type="dict", default=None), + keyboard=dict(type="str"), + kvm=dict(type="bool", default="yes"), + localtime=dict(type="bool"), + lock=dict(choices=["migrate", "backup", "snapshot", "rollback"]), + machine=dict(type="str"), + memory=dict(type="int", default=512), + migrate_downtime=dict(type="int"), + migrate_speed=dict(type="int"), + name=dict(type="str"), + nameserver=dict(type="str"), + net=dict(type="dict"), + newid=dict(type="int", default=None), + node=dict(), + numa=dict(type="dict"), + numa_enabled=dict(type="bool"), + onboot=dict(type="bool", default="yes"), + ostype=dict( + default="l26", + choices=[ + "other", "wxp", "w2k", "w2k3", "w2k8", "wvista", "win7", "win8", "l24", "l26", + "solaris" + ] + ), + parallel=dict(type="dict"), + pool=dict(type="str"), + protection=dict(type="bool"), + reboot=dict(type="bool"), + revert=dict(type="str", default=None), + sata=dict(type="dict"), + scsi=dict(type="dict"), + scsihw=dict( + choices=[ + "lsi", "lsi53c810", "virtio-scsi-pci", "virtio-scsi-single", "megasas", + "pvscsi" + ] + ), + searchdomain=dict(type="str"), + serial=dict(type="dict"), + shares=dict(type="int"), + skiplock=dict(type="bool"), + smbios=dict(type="str"), + snapname=dict(type="str"), + sockets=dict(type="int", default=1), + sshkeys=dict(type="str"), + startdate=dict(type="str"), + startup=dict(), + state=dict( + default="present", + choices=["present", "absent", "stopped", "started", "restarted", "current"] + ), + storage=dict(type="str"), + tablet=dict(type="bool", default="no"), + target=dict(type="str"), + tdf=dict(type="bool"), + template=dict(type="bool", default="no"), + timeout=dict(type="int", default=30), + update=dict(type="bool", default="no"), + validate_certs=dict(type="bool", default="no"), + vcpus=dict(type="int", default=None), + vga=dict( + default="std", + choices=[ + "std", "cirrus", "vmware", "qxl", "serial0", "serial1", "serial2", "serial3", + "qxl2", "qxl3", "qxl4" + ] + ), + virtio=dict(type="dict", default=None), + vmid=dict(type="int", default=None), + watchdog=dict(), + ), + mutually_exclusive=[("delete", "revert"), ("delete", "update"), ("revert", "update"), + ("clone", "update"), ("clone", "delete"), ("clone", "revert")], + required_one_of=[( + "name", + "vmid", + )], + required_if=[("state", "present", ["node"])] + ) + + if not HAS_PROXMOXER: + module.fail_json(msg="proxmoxer required for this module") + + api_user = module.params["api_user"] + api_host = module.params["api_host"] + api_password = module.params["api_password"] + clone = module.params["clone"] + cpu = module.params["cpu"] + cores = module.params["cores"] + delete = module.params["delete"] + memory = module.params["memory"] + name = module.params["name"] + newid = module.params["newid"] + node = module.params["node"] + revert = module.params["revert"] + sockets = module.params["sockets"] + state = module.params["state"] + timeout = module.params["timeout"] + update = bool(module.params["update"]) + vmid = module.params["vmid"] + validate_certs = module.params["validate_certs"] + + # If password not set get it from PROXMOX_PASSWORD env + if not api_password: + try: + api_password = os.environ["PROXMOX_PASSWORD"] + except KeyError: + module.fail_json( + msg="You should set api_password param or use " + "PROXMOX_PASSWORD environment variable" + ) + + try: + proxmox = ProxmoxAPI( + api_host, user=api_user, password=api_password, verify_ssl=validate_certs + ) + global VZ_TYPE + global PVE_MAJOR_VERSION # noqa + global PVE_FULL_VERSION # noqa + PVE_MAJOR_VERSION = 3 if float(proxmox.version.get()["version"]) < 4.0 else 4 + PVE_FULL_VERSION = float(proxmox.version.get()["version"]) + except Exception as e: + module.fail_json( + msg="authorization on proxmox cluster failed with exception: {}".format(e) + ) + + # If vmid not set get the Next VM id from ProxmoxAPI + # If vm name is set get the VM id from ProxmoxAPI + if not vmid: + if state == "present" and (not update and not clone) and (not delete and not revert): + try: + vmid = get_nextvmid(module, proxmox) + except Exception: + module.fail_json( + msg="Can't get the next vmid for VM{} automatically." + " Ensure your cluster state is good".format(name) + ) + else: + try: + if not clone: + vmid = get_vmid(proxmox, name)[0] + else: + vmid = get_vmid(proxmox, clone)[0] + except Exception: + if not clone: + module.fail_json(msg="VM {} does not exist in cluster.".format(name)) + else: + module.fail_json(msg="VM {} does not exist in cluster.".format(clone)) + + if clone is not None: + if get_vmid(proxmox, name): + module.exit_json(changed=False, msg="VM with name <{}> already exists".format(name)) + if vmid is not None: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg="VM with vmid = {} does not exist in cluster".format(vmid)) + if not newid: + try: + newid = get_nextvmid(module, proxmox) + except Exception: + module.fail_json( + msg="Can't get the next vmid for VM {} automatically." + " Ensure your cluster state is good".format(name) + ) + else: + vm = get_vm(proxmox, newid) + if vm: + module.exit_json( + changed=False, + msg="vmid {0} with VM name {1} already exists".format(newid, name) + ) + + if delete is not None: + try: + settings(module, proxmox, vmid, node, name, timeout, delete=delete) + module.exit_json( + changed=True, msg="Settings has deleted on VM {} with vmid {}".format(name, vmid) + ) + except Exception as e: + module.fail_json( + msg="Unable to delete settings on VM {} with vmid {}: {}". + format(name, vmid, str(e)) + ) + elif revert is not None: + try: + settings(module, proxmox, vmid, node, name, timeout, revert=revert) + module.exit_json( + changed=True, msg="Settings has reverted on VM {} with vmid {}".format(name, vmid) + ) + except Exception as e: + module.fail_json( + msg="Unable to revert settings on VM {} with vmid {}:" + " Maybe is not a pending task... ".format(name, vmid) + str(e) + ) + + if state == "present": + try: + if get_vm(proxmox, vmid) and not (update or clone): + module.exit_json( + changed=False, msg="VM with vmid <{}> already exists".format(vmid) + ) + elif get_vmid(proxmox, name) and not (update or clone): + module.exit_json( + changed=False, msg="VM with name <{}> already exists".format(name) + ) + elif not (node, name): + module.fail_json(msg="node, name is mandatory for creating/updating vm") + elif not node_check(proxmox, node): + module.fail_json(msg="node '{}' does not exist in cluster".format(node)) + + create_vm( + module, + proxmox, + vmid, + newid, + node, + name, + memory, + cpu, + cores, + sockets, + timeout, + update, + acpi=module.params["acpi"], + agent=module.params["agent"], + autostart=module.params["autostart"], + balloon=module.params["balloon"], + bios=module.params["bios"], + boot=module.params["boot"], + bootdisk=module.params["bootdisk"], + ciuser=module.params["ciuser"], + cipassword=module.params["cipassword"], + citype=module.params["citype"], + cpulimit=module.params["cpulimit"], + cpuunits=module.params["cpuunits"], + description=module.params["description"], + digest=module.params["digest"], + force=module.params["force"], + freeze=module.params["freeze"], + hostpci=module.params["hostpci"], + hotplug=module.params["hotplug"], + hugepages=module.params["hugepages"], + ide=module.params["ide"], + ipconfig=module.params["ipconfig"], + keyboard=module.params["keyboard"], + kvm=module.params["kvm"], + localtime=module.params["localtime"], + lock=module.params["lock"], + machine=module.params["machine"], + migrate_downtime=module.params["migrate_downtime"], + migrate_speed=module.params["migrate_speed"], + nameserver=module.params["nameserver"], + net=module.params["net"], + numa=module.params["numa"], + numa_enabled=module.params["numa_enabled"], + onboot=module.params["onboot"], + ostype=module.params["ostype"], + parallel=module.params["parallel"], + pool=module.params["pool"], + protection=module.params["protection"], + reboot=module.params["reboot"], + sata=module.params["sata"], + scsi=module.params["scsi"], + scsihw=module.params["scsihw"], + searchdomain=module.params["searchdomain"], + serial=module.params["serial"], + shares=module.params["shares"], + skiplock=module.params["skiplock"], + smbios1=module.params["smbios"], + snapname=module.params["snapname"], + sshkeys=module.params["sshkeys"], + startdate=module.params["startdate"], + startup=module.params["startup"], + tablet=module.params["tablet"], + target=module.params["target"], + tdf=module.params["tdf"], + template=module.params["template"], + vcpus=module.params["vcpus"], + vga=module.params["vga"], + virtio=module.params["virtio"], + watchdog=module.params["watchdog"] + ) + + if not clone: + get_vminfo( + module, + proxmox, + node, + vmid, + ide=module.params["ide"], + net=module.params["net"], + sata=module.params["sata"], + scsi=module.params["scsi"], + virtio=module.params["virtio"] + ) + if update: + module.exit_json( + changed=True, msg="VM {0} with vmid {1} updated".format(name, vmid) + ) + elif clone is not None: + module.exit_json( + changed=True, + msg="VM {0} with newid {1} cloned from vm with vmid {2}".format( + name, newid, vmid + ) + ) + else: + module.exit_json( + changed=True, + msg="VM {0} with vmid {1} deployed".format(name, vmid), + **results # noqa + ) + except Exception as e: + if update: + module.fail_json( + msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e) + ) + elif clone is not None: + module.fail_json( + msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e) + ) + else: + message = ( + "creation of {0} VM {1} with vmid {2} failed with exception={3}".format( + VZ_TYPE, name, vmid, e + ) + ) + module.fail_json(msg=message) + + elif state == "started": + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg="VM with vmid <{}> does not exist in cluster".format(vmid)) + if getattr(proxmox.nodes(vm[0]["node"]), + VZ_TYPE)(vmid).status.current.get()["status"] == "running": + module.exit_json(changed=False, msg="VM {} is already running".format(vmid)) + + if start_vm(module, proxmox, vm, vmid, timeout): + module.exit_json(changed=True, msg="VM {} started".format(vmid)) + except Exception as e: + module.fail_json(msg="starting of VM {0} failed with exception: {1}".format(vmid, e)) + + elif state == "stopped": + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg="VM with vmid={} does not exist in cluster".format(vmid)) + + if getattr(proxmox.nodes(vm[0]["node"]), + VZ_TYPE)(vmid).status.current.get()["status"] == "stopped": + module.exit_json(changed=False, msg="VM {} is already stopped".format(vmid)) + + if stop_vm(module, proxmox, vm, vmid, timeout, force=module.params["force"]): + module.exit_json(changed=True, msg="VM {} is shutting down".format(vmid)) + except Exception as e: + module.fail_json(msg="stopping of VM {0} failed with exception: {1}".format(vmid, e)) + + elif state == "restarted": + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg="VM with vmid={} does not exist in cluster".format(vmid)) + if getattr(proxmox.nodes(vm[0]["node"]), + VZ_TYPE)(vmid).status.current.get()["status"] == "stopped": + module.exit_json(changed=False, msg="VM {} is not running".format(vmid)) + + if ( + stop_vm(module, proxmox, vm, vmid, timeout, force=module.params["force"]) + and start_vm(module, proxmox, vm, vmid, timeout) + ): + module.exit_json(changed=True, msg="VM {} is restarted".format(vmid)) + except Exception as e: + module.fail_json(msg="restarting of VM {0} failed with exception: {1}".format(vmid, e)) + + elif state == "absent": + try: + vm = get_vm(proxmox, vmid) + node = proxmox.nodes(vm[0]["node"]) + if not vm: + module.exit_json(changed=False, msg="VM {} does not exist".format(vmid)) + + if getattr(node, VZ_TYPE)(vmid).status.current.get()["status"] == "running": + module.exit_json( + changed=False, msg="VM {} is running. Stop it before deletion.".format(vmid) + ) + + taskid = getattr(node, VZ_TYPE).delete(vmid) + while timeout: + + if ( + node.tasks(taskid).status.get()["status"] == "stopped" + and (node.tasks(taskid).status.get()["exitstatus"] == "OK") + ): + module.exit_json(changed=True, msg="VM {} removed".format(vmid)) + timeout -= 1 + if timeout == 0: + message = ( + "Reached timeout while waiting for removing VM." + " Last line in task before timeout: {}".format( + node.tasks(taskid).log.get()[:1] + ) + ) + module.fail_json(msg=message) + + time.sleep(1) + except Exception as e: + module.fail_json(msg="deletion of VM {0} failed with exception: {1}".format(vmid, e)) + + elif state == "current": + status = {} + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg="VM with vmid={} does not exist in cluster".format(vmid)) + current = getattr(proxmox.nodes(vm[0]["node"]), + VZ_TYPE)(vmid).status.current.get()["status"] + status["status"] = current + if status: + module.exit_json( + changed=False, + msg="VM {0} with vmid={1} is {2}".format(name, vmid, current), + **status + ) + except Exception as e: + module.fail_json( + msg="Unable to get vm {} with vmid = {} status: ".format(name, vmid) + str(e) + ) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ucr.py b/plugins/modules/ucr.py new file mode 100644 index 0000000..87229eb --- /dev/null +++ b/plugins/modules/ucr.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +"""Module to control Univention Corporate Registry.""" + +ANSIBLE_METADATA = {"metadata_version": "1.1", "status": ["preview"], "supported_by": "community"} + +DOCUMENTATION = """ +--- +module: ucr +short_description: Manage variables in univention configuration registry. +version_added: "2.6" +description: + - "This module allows to manage variables inside the univention configuration registry + on a univention corporate server (UCS)." +options: + path: + description: + - Path for the variable + required: True + default: null + value: + description: + - New value of the variable + required: False + state: + required: False + default: "present" + choices: ["present", "absent"] + description: + - Whether the variable should be exist or not. +author: + - Robert Kaussow (@xoxys) +""" + +EXAMPLES = """ +# Set variable to force https in ucs frontend +- name: Force https + ucr: + path: apache2/force_https + value: yes + +# Allow another user as root to login as ssh +- name: Add ssh user + ucr: + path: auth/sshd/user/myuser + value: yes +""" + +RETURN = """ +original_message: + description: The original name param that was passed in + type: str +message: + description: The output message that the sample module generates +""" + +from ansible.module_utils.basic import AnsibleModule +from univention.config_registry import ConfigRegistry # noqa +from univention.config_registry.frontend import ucr_update # noqa + + +def get_variable(ucr, path): + ucr.load() + if path in ucr: + value = ucr.get(path) + else: + value = None + return value + + +def set_variable(ucr, path, value, result): + org_value = get_variable(ucr, path) + ucr_update(ucr, {path: value}) + new_value = get_variable(ucr, path) + return not org_value == new_value + + +def dry_variable(ucr, path, value, result): + org_value = get_variable(ucr, path) + return not org_value == value + + +def main(): + ucr = ConfigRegistry() + + module_args = dict( + path=dict(type="str", required=True, aliases=["name"]), + value=dict(type="str", required=False, default=""), + state=dict(default="present", choices=["present", "absent"], type="str") + ) + + required_if = [["state", "present", ["value"]]] + + module = AnsibleModule( + argument_spec=module_args, supports_check_mode=True, required_if=required_if + ) + + result = dict(changed=False, original_message="", message="") + + path = module.params["path"] + value = module.params["value"] + if module.params["state"] == "present": + if value is None or value == "None": + value = "" + elif module.params["state"] == "absent": + value = None + + if not module.check_mode: + result["changed"] = set_variable(ucr, path, value, result) + else: + result["changed"] = dry_variable(ucr, path, value, result) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/setup.cfg b/setup.cfg index 1789a04..6d6fb61 100644 --- a/setup.cfg +++ b/setup.cfg @@ -4,7 +4,7 @@ known_first_party = ansiblelater sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER force_single_line = true line_length = 99 -skip_glob = **/.env*,**/env/*,**/docs/*,**/inventory/* +skip_glob = **/.env*,**/env/*,**/docs/*,**/inventory/*,**/modules/* [yapf] based_on_style = google diff --git a/test/unit/requirements.txt b/test/unit/requirements.txt index c199d3d..9afe0a6 100644 --- a/test/unit/requirements.txt +++ b/test/unit/requirements.txt @@ -1,5 +1,11 @@ ansible -# requirement for the proxmox module +# requirement for the proxmox modules proxmoxer requests + +# requirement for the corenetworks modules +corenetworks + +# requirement for the openssl_pkcs12 module +pyOpenSSL