/logs
/rtp/
/wrlinux-x/
+/okd/fetched
+
+# ignored anywhere
+fetched/
+tmp/
+temp/
+venv/
# documentation
.tox
# editor
.vscode
+rtp.code-workspace
+
[defaults]
roles_path = ./roles
+inventory = ./inventory
+playbook_dir = ./playbooks
+collections_paths = ./collections
remote_tmp = /tmp
+command_warnings = True
+ansible_managed = "This file is managed by Ansible - changes may be lost"
+retry_files_enabled = False
+forks = 8
+stdout_callback = yaml
+callback_whitelist = debug,profile_tasks
[privilege_escalation]
become_method = sudo
--- /dev/null
+---
+- name: Setup DNS Records
+ hosts: dns_host
+ gather_facts: false
+ roles:
+ - role: insert_dns_records
+ when: setup_dns_service | bool
--- /dev/null
+---
+- name: Install and http_store service
+ hosts: http_store
+ gather_facts: false
+ roles:
+ - role: setup_http_store
+ when: setup_http_store_service | bool
\ No newline at end of file
--- /dev/null
+---
+- name: Setup NTP
+ hosts: ntp_host
+ gather_facts: false
+ roles:
+ - role: setup_ntp
+ when: setup_ntp_service | bool
+
+---
collections:
- ansible.posix
- ansible.utils
+ - containers.podman
+ - community.crypto
- community.general
- community.libvirt
- kubernetes.core
--- /dev/null
+# Insert DNS Records roles
+
+Setups `dnsmasq` (either directly or via `NetworkManager`) inserting the DNS A records required for OpenShift install.
+
+## Role Variables
+| Variable | Required | Default | Options | Comments |
+| --------------------- | -------- | -------------- | ----------------------- | ----------------------------------------------------------- |
+| domain | yes | | | base for the DNS entries |
+| dns_entries_file_name | no | domains.dns | | |
+| dns_service_name | no | NetworkManager | NetworkManager, dnsmasq | the name of the service you want to manage your DNS records |
+| node_dns_records | no | | | dns records for the nodes of the OpenShift cluster |
+| extra_dns_records | no | | | used to defined dns records which are excess of the |
+
+The structure of `node_dns_records` and `extra_dns_records` is the same and as follows:
+
+```yaml
+node_dns_records:
+ master-0:
+ address: "<node.cluster.domain>"
+ ip: "<ip>"
+extra_dns_records:
+ place-0:
+ name: "place-0"
+ address: "<address>"
+ ip: "<ip>"
+ use_dhcp: false
+```
+
+## Example Playbook
+
+```yaml
+- name: Setup DNS Records
+ hosts: dns_host
+ roles:
+ - insert_dns_records
+ vars:
+ domain: "cluster.example.com"
+ node_dns_records:
+ master-0:
+ name: "master-0"
+ address: "master-0.cluster.example.com"
+ ip: "111.111.111.111"
+ use_dhcp: false
+ master-1:
+ name: "master-1"
+ address: "master-1.cluster.example.com"
+ ip: "111.111.111.112"
+ use_dhcp: false
+ master-2:
+ name: "master-2"
+ address: "master-2.cluster.example.com"
+ ip: "111.111.111.113"
+ use_dhcp: false
+```
--- /dev/null
+write_dnsmasq_config: true
+domain: "{{ cluster_name }}.{{ base_dns_domain }}"
+host_ip_keyword: "ansible_host"
+dns_entries_file_name: "{{ 'dnsmasq.' + cluster_name + '.conf' }}"
+dns_bmc_domain: "infra.{{ base_dns_domain }}"
+dns_bmc_address_suffix: "-bmc.{{ dns_bmc_domain }}"
+dns_service_name: NetworkManager
+dns_records:
+ apps:
+ address: ".apps.{{ domain }}"
+ ip: "{{ ingress_vip }}"
+ api:
+ address: "api.{{ domain }}"
+ ip: "{{ api_vip }}"
+ api_int:
+ address: "api-int.{{ domain }}"
+ ip: "{{ api_vip }}"
+
+node_dns_records: {}
+extra_dns_records: {}
+
+use_pxe: false
+use_dhcp: false
+dhcp_lease_time: 24h
+
+listen_address: "{{ ansible_default_ipv4.address }}"
+listen_addresses:
+ - "127.0.0.1"
+ - "{{ listen_address }}"
+
+
+
+required_domains:
+ "api": "api.{{ domain }}"
+ "api-int": "api-int.{{ domain }}"
+ "apps": "*.apps.{{ domain }}"
+
+expected_answers:
+ "api": "{{ api_vip }}"
+ "api-int": "{{ api_vip }}"
+ "apps": "{{ ingress_vip }}"
+
+required_binary: dig
+required_binary_provided_in_package: bind-utils
+domain: "{{ cluster_name }}.{{ base_dns_domain }}"
--- /dev/null
+[main]
+dns=dnsmasq
--- /dev/null
+- name: "Restart {{ dns_service_name }}"
+ ansible.builtin.service:
+ name: "{{ dns_service_name }}"
+ state: restarted
+ async: 45
+ poll: 5
+ listen: restart_dns
+ become: true
--- /dev/null
+- name: Check required domain {item} exists
+ ansible.builtin.shell:
+ cmd: "{{ required_binary }} {{ item.value }} A {{ item.value }} AAAA +short"
+ register: res
+ changed_when: false
+
+- name: Check stdout for expected IP address
+ ansible.builtin.set_fact:
+ failed_domains: "{{ (failed_domains | default({})) | combine(
+ {item.value: {
+ 'stdout': res.stdout,
+ 'stderr': res.stderr,
+ 'expected': expected_answers[item.key],
+ }}
+ ) }}"
+ when: expected_answers[item.key] not in res.stdout
--- /dev/null
+---
+- name: Open port in firewall for DNS
+ ansible.posix.firewalld:
+ port: "53/udp"
+ permanent: true
+ immediate: true
+ state: enabled
+ zone: "{{ item }}"
+ loop:
+ - internal
+ - public
+
+- name: Open port in firewall for DHCP
+ ansible.posix.firewalld:
+ port: "67/udp"
+ permanent: true
+ immediate: true
+ state: enabled
+ zone: "{{ item }}"
+ loop:
+ - internal
+ - public
+ when: use_dhcp | bool
--- /dev/null
+---
+- name: Create_host_entry - Make sure ansible_fqdn is populated if required.
+ ansible.builtin.setup:
+ delegate_to: "{{ entry_name }}"
+ delegate_facts: true
+ when:
+ - entry_extra_check | default(true)
+ - hostvars[entry_name]['ansible_fqdn'] is not defined
+
+- name: Create_host_entry - "Populate dns entry for {{ entry_name }}"
+ ansible.builtin.set_fact:
+ other_host_dns_records: "{{ (other_host_dns_records | default({})) | combine(
+ {
+ entry_address : {
+ 'name': (other_host_dns_records[entry_address]['name'] | default([])) + [entry_name],
+ 'address': entry_address,
+ 'ip': hostvars[entry_name][host_ip_keyword],
+ }
+ }
+ ) }}"
--- /dev/null
+---
+- name: Create dnsmasq domain config file
+ become: true
+ ansible.builtin.template:
+ src: openshift-cluster.conf.j2
+ dest: "/etc/dnsmasq.d/{{ dns_entries_file_name }}"
+ mode: "0644"
+ notify: restart_dns
+
+- name: Start dnsmasq
+ become: true
+ ansible.builtin.service:
+ name: dnsmasq
+ state: started
+ enabled: true
--- /dev/null
+---
+- name: Gather facts
+ ansible.builtin.setup:
+ gather_subset: all
+
+- name: Get node_records for nodes
+ set_fact:
+ node_dns_records: "{{ (node_dns_records | default({})) | combine(
+ {
+ item: {
+ 'name': item,
+ 'address': item + '.' + cluster_name + '.' + base_dns_domain,
+ 'ip': hostvars[item][hostvars[item]['host_ip_keyword'] | default(host_ip_keyword)],
+ 'mac': hostvars[item]['mac'] | default(False),
+ 'use_dhcp': hostvars[item]['ip'] | default('dhcp') == 'dhcp',
+ }
+ }
+ ) }}"
+ loop: "{{ groups['nodes'] }}"
+ when: hostvars[item][hostvars[item]['host_ip_keyword'] | default(host_ip_keyword)] is defined
+
+- name: Get node_records for node bmc_addresses when it is an IP address
+ ansible.builtin.set_fact:
+ bmc_dns_records: "{{ (bmc_dns_records | default({})) | combine(
+ {
+ item: {
+ 'name': item,
+ 'address': item + dns_bmc_address_suffix,
+ 'ip': hostvars[item]['bmc_ip'],
+ }
+ } ) }}"
+ loop: "{{ groups['nodes'] }}"
+ when:
+ - hostvars[item]['bmc_ip'] is defined
+ - hostvars[item]['bmc_ip'] | ansible.utils.ipaddr('bool')
+
+- name: Define bmc_address where required
+ ansible.builtin.set_fact:
+ bmc_address: "{{ item.data.address }}"
+ delegate_to: "{{ item.host }}"
+ delegate_facts: true
+ loop: "{{ bmc_dns_records | dict2items(key_name='host', value_name='data') }}"
+ when:
+ - bmc_dns_records is defined
+
+- name: Block for creating host entries
+ block:
+ - name: Create host records for Bastions and Services
+ ansible.builtin.include_tasks: create_host_entry.yml
+ vars:
+ entry_address: "{{ hostvars[item]['ansible_fqdn'] }}"
+ entry_name: "{{ item }}"
+ loop: "{{ groups['bastions'] + groups['services'] }}"
+ when:
+ - item != 'registry_host'
+ - hostvars[item][hostvars[item]['host_ip_keyword'] | default(host_ip_keyword)] | ansible.utils.ipaddr('bool')
+ - not (hostvars[item]['dns_skip_record'] | default(False)) | bool
+
+ - name: Create host records for Registry
+ ansible.builtin.include_tasks: create_host_entry.yml
+ vars:
+ entry_address: "{{ hostvars['registry_host']['registry_fqdn'] | default(hostvars['registry_host']['ansible_fqdn']) }}"
+ entry_name: "registry_host"
+ entry_extra_check: "{{ hostvars['registry_host']['registry_fqdn'] is not defined }}"
+ when:
+ - "'registry_host' in hostvars"
+ - hostvars['registry_host'][hostvars['registry_host']['host_ip_keyword'] | default(host_ip_keyword)] | ansible.utils.ipaddr('bool')
+ - not (hostvars['registry_host']['dns_skip_record'] | default(False)) | bool
+
+ - name: Create host records for Vm_hosts
+ ansible.builtin.include_tasks: create_host_entry.yml
+ vars:
+ entry_address: "{{ hostvars[item]['sushy_fqdn'] | default(hostvars[item]['ansible_fqdn']) }}"
+ entry_name: "{{ item }}"
+ entry_extra_check: "{{ hostvars[item]['sushy_fqdn'] is not defined }}"
+ loop: "{{ groups['vm_hosts'] | default([]) }}"
+ when: >-
+ hostvars[item][hostvars[item]['host_ip_keyword'] | default(host_ip_keyword)] |
+ ansible.utils.ipaddr('bool') and (not (hostvars[item]['dns_skip_record'] | default(False))) | bool
+
+- name: Configure firewall
+ become: true
+ ansible.builtin.import_tasks: configure_firewall.yml
+
+- name: Install dnsmasq
+ become: true
+ ansible.builtin.package:
+ name: dnsmasq
+ state: present
+
+- name: Configure dnsmasq to run under NetworkManager
+ become: true
+ ansible.builtin.import_tasks: network-manager.yml
+ when: dns_service_name == "NetworkManager"
+
+- name: Configure dnsmasq to run stand alone
+ become: true
+ ansible.builtin.import_tasks: dnsmasq.yml
+ when: dns_service_name == "dnsmasq"
+
+- name: Validate DNS records
+ ansible.builtin.include_tasks: validate.yml
+ loop: "{{ required_domains | dict2items() }}"
--- /dev/null
+---
+- name: Setup network manager to run dnsmasq
+ become: true
+ ansible.builtin.copy:
+ src: nm-dnsmasq.conf
+ dest: /etc/NetworkManager/conf.d/dnsmasq.conf
+ mode: "0644"
+
+- name: Create dnsmasq openshift-cluster config file
+ become: true
+ ansible.builtin.template:
+ src: openshift-cluster.conf.j2
+ dest: "/etc/NetworkManager/dnsmasq.d/{{ dns_entries_file_name }}"
+ mode: "0644"
+ notify: restart_dns
+
+- name: Start NetworkManager
+ become: true
+ ansible.builtin.service:
+ name: NetworkManager
+ state: started
+ enabled: true
+
+- name: Reload NetworkManager
+ become: true
+ ansible.builtin.service:
+ name: NetworkManager
+ state: reloaded
--- /dev/null
+- name: Check if the required binary for testing exists
+ ansible.builtin.shell:
+ cmd: "which {{ required_binary }}"
+ register: required_binary_check
+ ignore_errors: true
+ changed_when: false
+
+- name: (if binary is missing) Install the package providing the required binary
+ become: true
+ ansible.builtin.package:
+ name: "{{ required_binary_provided_in_package }}"
+ state: present
+ when: required_binary_check.rc != 0
+
+- name: Set inital failed_domains
+ ansible.builtin.set_fact:
+ failed_domains: {}
+
+- name: Check required domain {item} exists
+ ansible.builtin.shell:
+ cmd: "{{ required_binary }} {{ item.value }} A {{ item.value }} AAAA +short"
+ register: res
+ changed_when: false
+
+- name: Check stdout for expected IP address
+ ansible.builtin.set_fact:
+ failed_domains: "{{ (failed_domains | default({})) | combine(
+ {item.value: {
+ 'stdout': res.stdout,
+ 'stderr': res.stderr,
+ 'expected': expected_answers[item.key],
+ }}
+ ) }}"
+ when: expected_answers[item.key] not in res.stdout
+
+- name: List failed_domains
+ ansible.builtin.fail:
+ msg: |
+ Failed domains:
+ {% for failed in (failed_domains | dict2items) %}
+ {{ failed.key }}:
+ expected:
+ {{ failed.value.expected | indent(14) }}
+ stdout:
+ {{ failed.value.stdout | indent(14)}}
+ stderr:
+ {{ failed.value.stderr | indent(14) }}
+ {% endfor %}
+ when: failed_domains | length > 0
--- /dev/null
+domain={{ domain }}
+{% if write_dnsmasq_config %}
+domain-needed
+bogus-priv
+listen-address={{ listen_addresses | join(',') }}
+{% for listening_intf in (listening_interfaces | default([])) %}
+interface={{ listening_intf }}
+{% endfor%}
+{% for no_dhcp_intf in (no_dhcp_interfaces | default([])) %}
+no-dhcp-interface={{ no_dhcp_intf }}
+{% endfor%}
+expand-hosts
+{% if upstream_dns | default(False) %}
+server={{ upstream_dns }}
+{% endif %}
+{% endif %}
+
+{% if use_dhcp %}
+dhcp-range= tag:{{ cluster_name }},{{ dhcp_range_first }},{{ dhcp_range_last }}
+dhcp-option= tag:{{ cluster_name }},option:netmask,{{ (gateway + '/' + prefix | string) | ansible.utils.ipaddr('netmask') }}
+dhcp-option= tag:{{ cluster_name }},option:router,{{ gateway }}
+dhcp-option= tag:{{ cluster_name }},option:dns-server,{{ listen_address }}
+dhcp-option= tag:{{ cluster_name }},option:domain-search,{{ domain }}
+dhcp-option= tag:{{ cluster_name }},option:ntp-server,{{ ntp_server }}
+{% endif %}
+
+# Wildcard for apps and other api domains
+{% for item in dns_records.values() %}
+address=/{{ item.address }}/{{ item.ip }}
+{% endfor %}
+
+# Node addresses
+{% for item in node_dns_records.values() %}
+# {{ item.name }}
+{% if item.use_dhcp %}
+dhcp-host={{item.mac}},{{ item.ip }},{{ item.address }}, set:{{ cluster_name }}
+{% endif %}
+address=/{{ item.address }}/{{ item.ip }}
+ptr-record={{ item.ip.split('.')[::-1] | join('.') }}.in-addr.arpa,{{ item.address }}
+
+{% endfor %}
+
+{% if bmc_dns_records is defined %}
+# Node BMC addresses
+{% for item in bmc_dns_records.values() %}
+# {{ item.name }}
+address=/{{ item.address }}/{{ item.ip }}
+ptr-record={{ item.ip.split('.')[::-1] | join('.') }}.in-addr.arpa,{{ item.address }}
+
+{% endfor %}
+
+{% endif %}
+{% if other_host_dns_records is defined %}
+# Bastions, services and vm_hosts
+{% for item in other_host_dns_records.values() %}
+# {{ item.name | join(', ') }}
+address=/{{ item.address }}/{{ item.ip }}
+ptr-record={{ item.ip.split('.')[::-1] | join('.') }}.in-addr.arpa,{{ item.address }}
+
+{% endfor %}
+
+{% endif %}
+# User provided entries
+{% for item in extra_dns_records.values() %}
+# {{ item.name }}
+{% if item.use_dhcp %}
+dhcp-host={{item.mac}},{{ item.ip }},{{ item.address }}, set:{{ cluster_name }}
+{% endif %}
+address=/{{ item.address }}/{{ item.ip }}
+ptr-record={{ item.ip.split('.')[::-1] | join('.') }}.in-addr.arpa,{{ item.address }}
+
+{% endfor %}
--- /dev/null
+# Setup HTTP Store
+
+Sets up a web host which can be used to distribute ISO's for `boot_iso` role
+
+## Role Variables
+
+| Variable | Required | Default | Comments |
+|---------------------------|----------|----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------|
+| http_store_container_name | no | http_store | |
+| http_store_pod_name | no | http_store_pod | |
+| http_dir | no | /opt/http_store | |
+| http_data_dir | no | "{{ http_dir }}/data" | |
+| container_image | no | registry.centos.org/centos/httpd-24-centos7:latest | If you change this to anything other than the same image on a different host you may need to change then environment vars in the task |
+| http_store_dir | no | /opt/http_store/data |
+| http_port | no | int 80 |
+| http_host | no | {{ 'http://' + hostvars['http_store']['ansible_host'] }} |
+| test_file_name | no | "http_test" |
+
+## Dependencies
+
+- containers.podman
+
+## Example Playbook
+
+```yaml
+- name: Install and http_store service
+ hosts: http_store
+ roles:
+ - setup_http_store
+ vars:
+ http_store_container_name: "iso store"
+```
--- /dev/null
+http_store_container_name: http_store
+http_store_pod_name: http_store_pod
+http_dir: /opt/http_store
+http_data_dir: "{{ http_dir }}/data"
+http_port: 80
+# Note if you change this you might have to change the env vars and volumes for podman task
+container_image: quay.io/fedora/httpd-24:latest
+file_owner: "{{ ansible_env.USER }}"
+file_group: "{{ file_owner }}"
+
+http_store_dir : "{{ iso_download_dest_path | default('/opt/http_store/data') }}"
+http_port: 80
+http_host: "{{ discovery_iso_server | default('http://' + hostvars['http_store']['ansible_host']) }}:{{ http_port }}"
+test_file_name: http_test
--- /dev/null
+---
+- name: Check if podman is installed
+ ansible.builtin.stat:
+ path: /usr/bin/podman
+ register: result
+
+- name: Install podman
+ become: true
+ ansible.builtin.package:
+ name: podman
+ state: present
+ when: not result.stat.exists | bool
+
+- name: Create directory to hold the registry files
+ become: true
+ ansible.builtin.file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ file_owner }}"
+ group: "{{ file_group }}"
+ mode: "0775"
+ recurse: true
+ loop:
+ - "{{ http_dir }}"
+ - "{{ http_data_dir }}"
+
+- name: Create httpd container
+ become: true
+ containers.podman.podman_pod:
+ name: "{{ http_store_pod_name }}"
+ publish:
+ - "{{ http_port }}:8080"
+ register: pod_info
+
+- ansible.builtin.debug: # noqa unnamed-task
+ var: pod_info
+ verbosity: 1
+
+- name: Create httpd container
+ become: true
+ containers.podman.podman_container:
+ name: "{{ http_store_container_name }}"
+ image: "{{ container_image }}"
+ pod: "{{ http_store_pod_name }}"
+ state: stopped
+ volumes:
+ - "{{ http_data_dir }}:/var/www/html:z"
+ register: container_info
+
+- name: Setting facts about container
+ ansible.builtin.set_fact:
+ http_store_name: "{{ container_info.container.Name }}"
+ http_store_pidfile: "{{ container_info.container.ConmonPidFile }}"
+
+- name: Copy the systemd service file
+ become: true
+ ansible.builtin.template:
+ src: http_store.service.j2
+ dest: "/etc/systemd/system/http_store.service"
+ owner: root
+ group: root
+ mode: "0644"
--- /dev/null
+---
+- name: Install firewalld #noqa: literal-compare
+ become: true
+ block:
+ - name: Collect service facts
+ ansible.builtin.service_facts:
+ - name: Is firewalld installed
+ ansible.builtin.assert:
+ that: '"firewalld.service" in ansible_facts.services'
+ quiet: true
+ rescue:
+ - name: Installing firewalld
+ ansible.builtin.package:
+ name: firewalld
+ state: present
+
+- name: Start firewalld
+ become: true
+ ansible.builtin.service:
+ name: firewalld.service
+ state: started
+ enabled: true
+
+- name: Firewalld open tcp port http peristent immidiate enabled zones internal, and public
+ become: true
+ ansible.posix.firewalld:
+ port: "{{ http_port }}/tcp"
+ permanent: true
+ immediate: true
+ state: enabled
+ zone: "{{ item }}"
+ loop:
+ - internal
+ - public
--- /dev/null
+---
+- name: Gather facts
+ ansible.builtin.setup:
+ gather_subset: all
+
+
+- name: Configure firewall
+ ansible.builtin.include_tasks: firewalld.yml
+
+- name: Configure http container
+ ansible.builtin.include_tasks: container.yml
+
+- name: Starting and Enabling the http_store.service
+ become: true
+ ansible.builtin.systemd:
+ name: http_store
+ enabled: true
+ state: started
+ scope: system
+ daemon_reexec: true
+ daemon_reload: true
+
+- name: Validate http deployment
+ ansible.builtin.include_tasks: validate.yml
+
--- /dev/null
+---
+- name: Set test file contents
+ ansible.builtin.set_fact:
+ contents: "{{ lookup('template', 'test_file.j2') }}"
+
+- name: Copy file to http_store
+ ansible.builtin.copy:
+ content: "{{ contents }}"
+ dest: "{{ http_store_dir }}/{{ test_file_name }}"
+ mode: "0644"
+ setype: httpd_sys_content_t
+ become: true
+ delegate_to: http_store
+
+- name: Retrieve file from http_store
+ ansible.builtin.uri:
+ url: "{{ http_host }}/{{ test_file_name }}"
+ return_content: true
+ register: response
+ delegate_to: bastion
+
+- name: Check content matches
+ ansible.builtin.assert:
+ that: response.content == contents
+ quiet: true
+
+- name: Remove file on http_store
+ ansible.builtin.file:
+ path: "{{ http_store_dir }}/{{ test_file_name }}"
+ state: absent
+ become: true
+ delegate_to: http_store
+
--- /dev/null
+[Unit]
+Description=Podman http_store.service
+[Service]
+Restart=on-failure
+ExecStart=/usr/bin/podman pod start {{ http_store_pod_name }}
+ExecStop=/usr/bin/podman pod stop -t 10 {{ http_store_pod_name }}
+KillMode=none
+Type=forking
+PIDFile={{ http_store_pidfile }}
+[Install]
+WantedBy=default.target
--- /dev/null
+{{ 99999999 | random | to_uuid }}
--- /dev/null
+# setup_ntp
+
+Deploys and configures chrony
--- /dev/null
+---
+ntp_pool_servers:
+ - 0.us.pool.ntp.org
+ - 1.us.pool.ntp.org
+ - 2.us.pool.ntp.org
+ - 3.us.pool.ntp.org
+
+enable_logging: false
+
+ntp_server_allow: []
--- /dev/null
+---
+- name: Setup
+ ansible.builtin.setup:
+ gather_subset: all
+
+- name: Chrony block
+ become: true
+ block:
+ - name: Check if chrony is installed
+ ansible.builtin.service:
+ name: chronyd.service
+ enabled: true
+ rescue:
+ - name: Installing chrony
+ become: true
+ ansible.builtin.package:
+ name: chrony
+ state: present
+
+- name: Configure chrony
+ become: true
+ ansible.builtin.template:
+ src: chrony.conf.j2
+ dest: /etc/chrony.conf
+ owner: root
+ group: root
+ mode: "0644"
+ register: result
+
+- name: Start and enable chrony
+ become: true
+ ansible.builtin.service:
+ name: chronyd
+ state: started
+ enabled: true
+ when: not result.failed
+
+- name: Allow incoming tcp traffic for service ntp permanent and immidiate for public zone
+ become: true
+ ansible.posix.firewalld:
+ zone: public
+ service: ntp
+ permanent: true
+ state: enabled
+ immediate: true
+
+- name: Validating chronyd
+ become: true
+ ansible.builtin.command: chronyc ntpdata
+ register: result
+
+- name: Validating chrony - PASSED
+ ansible.builtin.debug:
+ msg: "Chrony validation PASSED."
+ when: result.failed == false
--- /dev/null
+# {{ ansible_managed }}
+driftfile /var/lib/chrony/drift
+bindcmdaddress {{ ntp_server }}
+bindcmdaddress 127.0.0.1
+bindcmdaddress ::1
+keyfile /etc/chrony.keys
+local stratum 10
+rtcsync
+makestep 1.0 3
+manual
+{% if enable_logging %}
+logdir /var/log/chrony
+log measurements statistics tracking
+{% endif %}
+
+allow 127.0.0.1
+{% for server in ntp_server_allow %}
+allow {{ server }}
+{% endfor %}
+
+server 127.0.0.1
+{% for item in ntp_pool_servers %}
+server {{ item }}
+{% endfor %}