diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 89971ae..9f62022 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -6,7 +6,7 @@ stages: include: - project: 'developerdurp/jobtemplates' - ref: release/1.0.0 + ref: main file: - 'terraform.yml' - 'ansible.yml' @@ -32,6 +32,19 @@ validate: - if: $CI_PIPELINE_SOURCE == "merge_request_event" when: always - when: never + +ansible: + stage: lint + variables: + WORKDIR: $CI_PROJECT_DIR/ansible + REQUIREMENTS: requirements.yml + FILE: main.yml + allow_failure: false + extends: .ansible_lint + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + when: always + - when: never plan: stage: lint @@ -156,3 +169,4 @@ k3s-destroy: - 'ansible/*' when: always - when: never + diff --git a/ansible/dns.yml b/ansible/dns.yml index 54c00bb..129e06d 100644 --- a/ansible/dns.yml +++ b/ansible/dns.yml @@ -2,9 +2,9 @@ roles: - cloudflare vars: - DNS: + dns: - {record: 'bitwarden', zone: 'durp.info', proxied: 'yes', state: 'present'} - - {record: 'nextcloud', zone: 'durp.info', proxied: 'yes', state: 'present'} + - {record: 'nextcloud', zone: 'durp.info', proxied: 'yes', state: 'present'} - {record: 'grafana', zone: 'durp.info', proxied: 'yes', state: 'present'} - {record: 'kong', zone: 'durp.info', proxied: 'yes', state: 'present'} - {record: '@', zone: 'durp.info', proxied: 'yes', state: 'present'} @@ -16,5 +16,4 @@ - {record: 'docker', zone: 'durp.info', proxied: 'yes', state: 'present'} - {record: 'authentik', zone: 'durp.info', proxied: 'yes', state: 'present'} - {record: 'plex', zone: 'durp.info', proxied: 'yes', state: 'present'} - - {record: 'vault', zone: 'durp.info', proxied: 'yes', state: 'present'} - + - {record: 'vault', zone: 'durp.info', proxied: 'yes', state: 'present'} diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index 568e818..36b5800 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -13,7 +13,7 @@ flannel_iface: "eth0" apiserver_endpoint: "192.168.20.120" # k3s_token is required masters can talk together securely -k3s_token: "{{ lookup('env','k3s_token') }}" +k3s_token: "{{ lookup('env', 'k3s_token') }}" # The IP on which the node is reachable in the cluster. # Here, a sensible default is provided, you can still override @@ -42,7 +42,7 @@ extra_server_args: >- --kubelet-arg containerd=/run/k3s/containerd/containerd.sock --feature-gates RemoveSelfLink=false extra_agent_args: >- - {{ extra_args }} + {{ extra_args }} --kubelet-arg node-status-update-frequency=5s # image tag for kube-vip diff --git a/ansible/main.yml b/ansible/main.yml index e85f436..aece7f9 100644 --- a/ansible/main.yml +++ b/ansible/main.yml @@ -1,33 +1,32 @@ - hosts: all - gather_facts: yes - become: yes + gather_facts: true + become: true roles: - base - hosts: k3s_cluster - gather_facts: yes - become: yes + gather_facts: true + become: true roles: - k3s/prereq - k3s/download - hosts: master - become: yes - roles: + become: true + roles: - k3s/master - hosts: node - become: yes + become: true roles: - k3s/node - hosts: master - become: yes + become: true roles: - - role: k3s/post + - role: k3s/post - hosts: master[0] - become: yes + become: true roles: - k3s/argocd - diff --git a/ansible/roles/base/tasks/main.yml b/ansible/roles/base/tasks/main.yml index 0ac6434..ee8fc11 100644 --- a/ansible/roles/base/tasks/main.yml +++ b/ansible/roles/base/tasks/main.yml @@ -1,58 +1,57 @@ ---- - name: Run Package tasks - include_tasks: + ansible.builtin.include_tasks: file: ./templates/packages.yml - name: Create user account - user: + ansible.builtin.user: name: "{{ username }}" password: "{{ userpassword }}" groups: sudo shell: /bin/bash state: present - createhome: yes - when: ansible_os_family == "Debian" + createhome: true + when: ansible_os_family == "Debian" - name: Create user account - user: + ansible.builtin.user: name: "{{ username }}" password: "{{ userpassword }}" shell: /bin/bash groups: wheel state: present - createhome: yes - when: ansible_os_family == "RedHat" - + createhome: true + when: ansible_os_family == "RedHat" + - name: Run SSH tasks - include_tasks: + ansible.builtin.include_tasks: file: ssh.yml - name: Copy unattended-upgrades file - copy: + ansible.builtin.copy: src: files/10periodic - dest: /etc/apt/apt.conf.d/10periodic - owner: root - group: root + dest: /etc/apt/apt.conf.d/10periodic + owner: root + group: root mode: "0644" - force: yes - when: ansible_os_family == "Debian" + force: true + when: ansible_os_family == "Debian" - name: Remove undesirable packages - package: + ansible.builtin.package: name: "{{ unnecessary_software }}" state: absent - when: ansible_os_family == "Debian" + when: ansible_os_family == "Debian" - name: Stop and disable unnecessary services - service: + ansible.builtin.service: name: "{{ item }}" state: stopped - enabled: no + enabled: false with_items: "{{ unnecessary_services }}" - ignore_errors: yes + ignore_errors: "{{ ansible_check_mode }}" - name: Set a message of the day - copy: + ansible.builtin.copy: dest: /etc/motd src: files/motd owner: root @@ -60,7 +59,7 @@ mode: 0644 - name: Set a login banner - copy: + ansible.builtin.copy: dest: "{{ item }}" src: files/issue owner: root @@ -70,13 +69,14 @@ - /etc/issue - /etc/issue.net -- name: set timezone - shell: timedatectl set-timezone America/Chicago +- name: Set timezone + ansible.builtin.command: timedatectl set-timezone America/Chicago + changed_when: my_output.rc != 0 - name: Enable cockpit - systemd: + ansible.builtin.systemd: name: cockpit - daemon_reload: yes + daemon_reload: true state: restarted - enabled: yes - when: ansible_os_family == "RedHat" + enabled: true + when: ansible_os_family == "RedHat" diff --git a/ansible/roles/base/tasks/ssh.yml b/ansible/roles/base/tasks/ssh.yml index b1cb48c..26f49a4 100644 --- a/ansible/roles/base/tasks/ssh.yml +++ b/ansible/roles/base/tasks/ssh.yml @@ -1,47 +1,50 @@ - name: Deploy SSH Key (administrator) - copy: + ansible.builtin.copy: dest: /home/administrator/.ssh/authorized_keys src: files/authorized_keys_administrator - force: true + mode: "0600" + force: true -- name: ensure ssh folder exists for user - file: +- name: Ensure ssh folder exists for user + ansible.builtin.file: path: /home/user/.ssh + mode: "0600" state: directory - name: Deploy SSH Key (user) - copy: + ansible.builtin.copy: dest: /home/user/.ssh/authorized_keys src: files/authorized_keys_user - force: true + mode: "0600" + force: true - name: Remove Root SSH Configuration - file: + ansible.builtin.file: path: /root/.ssh state: absent - name: Copy Secured SSHD Configuration - copy: - src: files/sshd_config_secured - dest: /etc/ssh/sshd_config - owner: root - group: root + ansible.builtin.copy: + src: files/sshd_config_secured + dest: /etc/ssh/sshd_config + owner: root + group: root mode: "0644" - when: ansible_os_family == "Debian" + when: ansible_os_family == "Debian" - name: Copy Secured SSHD Configuration - copy: - src: files/sshd_config_secured_redhat - dest: /etc/ssh/sshd_config - owner: root - group: root - mode: "0644" - when: ansible_os_family == "RedHat" + ansible.builtin.copy: + src: files/sshd_config_secured_redhat + dest: /etc/ssh/sshd_config + owner: root + group: root + mode: "0644" + when: ansible_os_family == "RedHat" - name: Restart SSHD - systemd: + ansible.builtin.systemd: name: sshd - daemon_reload: yes + daemon_reload: true state: restarted - enabled: yes - ignore_errors: yes + enabled: true + ignore_errors: "{{ ansible_check_mode }}" diff --git a/ansible/roles/base/vars/main.yml b/ansible/roles/base/vars/main.yml index 539bb50..b6da8db 100644 --- a/ansible/roles/base/vars/main.yml +++ b/ansible/roles/base/vars/main.yml @@ -10,16 +10,14 @@ required_packages: redhat_required_packages: - qemu-guest-agent - - cockpit - - iscsi-initiator-utils + - cockpit + - iscsi-initiator-utils unnecessary_services: - postfix - telnet - + unnecessary_software: - tcpdump - nmap-ncat - - wpa_supplicant - - + - wpa_supplicant diff --git a/ansible/roles/cloudflare/tasks/main.yml b/ansible/roles/cloudflare/tasks/main.yml index 5a5a5ee..492846b 100644 --- a/ansible/roles/cloudflare/tasks/main.yml +++ b/ansible/roles/cloudflare/tasks/main.yml @@ -6,6 +6,6 @@ state: "{{ item.state }}" type: A proxied: "{{ item.proxied }}" - value: "{{ lookup('env','external_ip') }}" - api_token: "{{ lookup('env','cloudflareapi') }}" - with_items: "{{ DNS }}" + value: "{{ lookup('env', 'external_ip') }}" + api_token: "{{ lookup('env', 'external_ip') }}" + with_items: "{{ dns }}" diff --git a/ansible/roles/k3s/argocd/tasks/main.yml b/ansible/roles/k3s/argocd/tasks/main.yml index 76d2c57..236a959 100644 --- a/ansible/roles/k3s/argocd/tasks/main.yml +++ b/ansible/roles/k3s/argocd/tasks/main.yml @@ -1,29 +1,29 @@ ---- -- name: copy configs - copy: +- name: Copy configs + ansible.builtin.copy: src: ./roles/k3s/argocd/templates/ dest: /opt/argocd - owner: administrator - group: administrator + owner: administrator + group: administrator mode: "0664" - force: yes + force: true -- name: copy configs - copy: - src: "{{ lookup('env','kubeseal') }}" +- name: Copy configs + ansible.builtin.copy: + src: "{{ lookup('env', 'kubeseal') }}" dest: /opt/kubeseal.yaml - owner: administrator - group: administrator + owner: administrator + group: administrator mode: "0600" - force: yes + force: true - name: Apply Kubeseal master key - command: k3s kubectl apply -f /opt/kubeseal.yaml --force + ansible.builtin.command: k3s kubectl apply -f /opt/kubeseal.yaml --force + changed_when: my_output.rc != 0 - name: Apply ArgoCD - command: k3s kubectl apply -f /opt/argocd/argocd.yaml -n argocd + ansible.builtin.command: k3s kubectl apply -f /opt/argocd/argocd.yaml -n argocd + changed_when: my_output.rc != 0 - name: Apply ArgoCD Apps - command: k3s kubectl apply -f /opt/argocd/apps.yaml -n argocd - - + ansible.builtin.command: k3s kubectl apply -f /opt/argocd/apps.yaml -n argocd + changed_when: my_output.rc != 0 diff --git a/ansible/roles/k3s/download/tasks/main.yml b/ansible/roles/k3s/download/tasks/main.yml index 543009e..19cf8ef 100644 --- a/ansible/roles/k3s/download/tasks/main.yml +++ b/ansible/roles/k3s/download/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Download k3s binary x64 - get_url: + ansible.builtin.get_url: url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt dest: /usr/local/bin/k3s diff --git a/ansible/roles/k3s/master/tasks/main.yml b/ansible/roles/k3s/master/tasks/main.yml index 528db63..9a2c1c8 100644 --- a/ansible/roles/k3s/master/tasks/main.yml +++ b/ansible/roles/k3s/master/tasks/main.yml @@ -1,6 +1,5 @@ ---- - name: Create manifests directory on first master - file: + ansible.builtin.file: path: /var/lib/rancher/k3s/server/manifests state: directory owner: root @@ -9,7 +8,7 @@ when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] - name: Copy vip rbac manifest to first master - template: + ansible.builtin.template: src: "vip.rbac.yaml.j2" dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml" owner: root @@ -18,7 +17,7 @@ when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] - name: Copy vip manifest to first master - template: + ansible.builtin.template: src: "vip.yaml.j2" dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml" owner: root @@ -28,7 +27,7 @@ # these will be copied and installed now, then tested later and apply config - name: Copy metallb namespace to first master - template: + ansible.builtin.template: src: "metallb.namespace.j2" dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml" owner: root @@ -37,7 +36,7 @@ when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] - name: Copy metallb namespace to first master - template: + ansible.builtin.template: src: "metallb.crds.j2" dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml" owner: root @@ -46,7 +45,7 @@ when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] - name: Init cluster inside the transient k3s-init service - command: + ansible.builtin.command: cmd: "systemd-run -p RestartSec=2 \ -p Restart=on-failure \ --unit=k3s-init \ @@ -56,9 +55,10 @@ warn: false # The ansible systemd module does not support transient units - name: Verification + when: not ansible_check_mode block: - name: Verify that all nodes actually joined (check k3s-init.service if this fails) - command: + ansible.builtin.command: cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}" register: nodes until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length) @@ -67,21 +67,20 @@ changed_when: false always: - name: Save logs of k3s-init.service - include_tasks: fetch_k3s_init_logs.yml + ansible.builtin.include_tasks: fetch_k3s_init_logs.yml when: log_destination vars: log_destination: >- {{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }} - name: Kill the temporary service used for initialization - systemd: + ansible.builtin.systemd: name: k3s-init state: stopped failed_when: false - when: not ansible_check_mode - name: Copy K3s service file register: k3s_service - template: + ansible.builtin.template: src: "k3s.service.j2" dest: "{{ systemd_dir }}/k3s.service" owner: root @@ -89,55 +88,55 @@ mode: 0644 - name: Enable and check K3s service - systemd: + ansible.builtin.systemd: name: k3s - daemon_reload: yes + daemon_reload: true state: restarted - enabled: yes + enabled: true - name: Wait for node-token - wait_for: + ansible.builtin.wait_for: path: /var/lib/rancher/k3s/server/node-token - name: Register node-token file access mode - stat: + ansible.builtin.stat: path: /var/lib/rancher/k3s/server register: p - name: Change file access node-token - file: + ansible.builtin.file: path: /var/lib/rancher/k3s/server mode: "g+rx,o+rx" - name: Read node-token from master - slurp: + ansible.builtin.slurp: src: /var/lib/rancher/k3s/server/node-token register: node_token - name: Store Master node-token - set_fact: + ansible.builtin.set_fact: token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}" - name: Restore node-token file access - file: + ansible.builtin.file: path: /var/lib/rancher/k3s/server mode: "{{ p.stat.mode }}" - name: Create directory .kube - file: + ansible.builtin.file: path: ~{{ item }}/.kube state: directory owner: "{{ item }}" mode: "u=rwx,g=rx,o=" loop: - "{{ ansible_user }}" - - "{{ username }}" + - "{{ username }}" - name: Copy config file to user home directory - copy: + ansible.builtin.copy: src: /etc/rancher/k3s/k3s.yaml dest: ~{{ item }}/.kube/config - remote_src: yes + remote_src: true owner: "{{ item }}" mode: "u=rw,g=,o=" loop: @@ -145,48 +144,44 @@ - "{{ username }}" - name: Configure kubectl cluster to {{ endpoint_url }} - command: >- + ansible.builtin.command: >- k3s kubectl config set-cluster default --server={{ endpoint_url }} --kubeconfig ~{{ item }}/.kube/config changed_when: true loop: - "{{ ansible_user }}" - - "{{ username }}" + - "{{ username }}" vars: endpoint_url: >- - https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 - # Deactivated linter rules: - # - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap - # would be undefined. This will not be the case during playbook execution. - # noqa jinja[invalid] + https://{{ "apiserver_endpoint | ansible.utils.ipwrap" }}:6443 - name: Create kubectl symlink - file: + ansible.builtin.file: src: /usr/local/bin/k3s dest: /usr/local/bin/kubectl state: link - name: Create crictl symlink - file: + ansible.builtin.file: src: /usr/local/bin/k3s dest: /usr/local/bin/crictl state: link - name: Get contents of manifests folder - find: + ansible.builtin.find: paths: /var/lib/rancher/k3s/server/manifests file_type: file register: k3s_server_manifests - name: Get sub dirs of manifests folder - find: + ansible.builtin.find: paths: /var/lib/rancher/k3s/server/manifests file_type: directory register: k3s_server_manifests_directories - name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start - file: + ansible.builtin.file: path: "{{ item.path }}" state: absent with_items: @@ -194,4 +189,3 @@ - "{{ k3s_server_manifests_directories.files }}" loop_control: label: "{{ item.path }}" - diff --git a/ansible/roles/k3s/node/tasks/main.yml b/ansible/roles/k3s/node/tasks/main.yml index 0ce8e08..0fc7793 100644 --- a/ansible/roles/k3s/node/tasks/main.yml +++ b/ansible/roles/k3s/node/tasks/main.yml @@ -1,7 +1,5 @@ ---- - - name: Copy K3s service file - template: + ansible.builtin.template: src: "k3s.service.j2" dest: "{{ systemd_dir }}/k3s-node.service" owner: root @@ -9,8 +7,8 @@ mode: 0755 - name: Enable and check K3s service - systemd: + ansible.builtin.systemd: name: k3s-node - daemon_reload: yes + daemon_reload: true state: restarted - enabled: yes + enabled: true diff --git a/ansible/roles/k3s/post/tasks/main.yml b/ansible/roles/k3s/post/tasks/main.yml index a838885..204dd7a 100644 --- a/ansible/roles/k3s/post/tasks/main.yml +++ b/ansible/roles/k3s/post/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Create manifests directory for temp configuration - file: + ansible.builtin.file: path: /tmp/k3s state: directory owner: "{{ ansible_user }}" @@ -9,7 +9,7 @@ run_once: true - name: Copy metallb CRs manifest to first master - template: + ansible.builtin.template: src: "metallb.crs.j2" dest: "/tmp/k3s/metallb-crs.yaml" owner: "{{ ansible_user }}" @@ -18,14 +18,14 @@ run_once: true - name: Test metallb-system namespace - command: >- + ansible.builtin.command: >- k3s kubectl -n metallb-system changed_when: false with_items: "{{ groups['master'] }}" run_once: true - name: Wait for MetalLB resources - command: >- + ansible.builtin.command: >- k3s kubectl wait {{ item.resource }} --namespace='metallb-system' {% if item.name | default(False) -%}{{ item.name }}{%- endif %} @@ -63,14 +63,14 @@ label: "{{ item.description }}" - name: Test metallb-system webhook-service endpoint - command: >- + ansible.builtin.command: >- k3s kubectl -n metallb-system get endpoints webhook-service changed_when: false with_items: "{{ groups['master'] }}" run_once: true - name: Apply metallb CRs - command: >- + ansible.builtin.command: >- k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml --timeout='{{ metal_lb_available_timeout }}' register: this @@ -80,7 +80,7 @@ retries: 5 - name: Test metallb-system resources - command: >- + ansible.builtin.command: >- k3s kubectl -n metallb-system get {{ item }} changed_when: false run_once: true @@ -89,6 +89,6 @@ - L2Advertisement - name: Remove tmp directory used for manifests - file: + ansible.builtin.file: path: /tmp/k3s state: absent diff --git a/ansible/roles/k3s/prereq/tasks/main.yml b/ansible/roles/k3s/prereq/tasks/main.yml index dcab613..713c47e 100644 --- a/ansible/roles/k3s/prereq/tasks/main.yml +++ b/ansible/roles/k3s/prereq/tasks/main.yml @@ -1,61 +1,60 @@ ---- - name: Set same timezone on every Server - timezone: + community.general.timezone: name: "{{ system_timezone }}" when: (system_timezone is defined) and (system_timezone != "Your/Timezone") - name: Set SELinux to disabled state - selinux: + ansible.posix.selinux: state: disabled when: ansible_os_family == "RedHat" - name: Enable IPv4 forwarding - sysctl: + ansible.posix.sysctl: name: net.ipv4.ip_forward value: "1" state: present - reload: yes + reload: true - name: Enable IPv6 forwarding - sysctl: + ansible.posix.sysctl: name: net.ipv6.conf.all.forwarding value: "1" state: present - reload: yes + reload: true - name: Enable IPv6 router advertisements - sysctl: + ansible.posix.sysctl: name: net.ipv6.conf.all.accept_ra value: "2" state: present - reload: yes + reload: true - name: Add br_netfilter to /etc/modules-load.d/ - copy: + ansible.builtin.copy: content: "br_netfilter" dest: /etc/modules-load.d/br_netfilter.conf mode: "u=rw,g=,o=" when: ansible_os_family == "RedHat" - name: Load br_netfilter - modprobe: + community.general.modprobe: name: br_netfilter state: present when: ansible_os_family == "RedHat" - name: Set bridge-nf-call-iptables (just to be sure) - sysctl: + ansible.posix.sysctl: name: "{{ item }}" value: "1" state: present - reload: yes + reload: true when: ansible_os_family == "RedHat" loop: - net.bridge.bridge-nf-call-iptables - net.bridge.bridge-nf-call-ip6tables - name: Add /usr/local/bin to sudo secure_path - lineinfile: + ansible.builtin.lineinfile: line: 'Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin' regexp: "Defaults(\\s)*secure_path(\\s)*=" state: present diff --git a/ansible/roles/update/tasks/main.yml b/ansible/roles/update/tasks/main.yml index 6594b8f..d57847a 100644 --- a/ansible/roles/update/tasks/main.yml +++ b/ansible/roles/update/tasks/main.yml @@ -1,46 +1,49 @@ -- name: check packages for updates - shell: yum list updates | awk 'f;/Updated Packages/{f=1;}' | awk '{ print $1 }' +- name: Check packages for updates + ansible.builtin.shell: set -o pipefail && yum list updates | awk 'f;/Updated Packages/{f=1;}' | awk '{ print $1 }' changed_when: updates.stdout_lines | length > 0 args: warn: false register: updates -- name: display count - debug: +- name: Display count + ansible.builtin.debug: msg: "Found {{ updates.stdout_lines | length }} packages to be updated:\n\n{{ updates.stdout }}" -- when: updates.stdout_lines | length > 0 +- name: Update if needed + when: updates.stdout_lines | length > 0 block: - - - name: install updates using yum - yum: - name: "*" - state: latest - - name: install yum-utils - package: + - name: Install updates using yum + ansible.builtin.yum: + name: "*" + state: present + update_only: true + + - name: Install yum-utils + ansible.builtin.package: name: yum-utils - - name: check if reboot is required - shell: needs-restarting -r + - name: Check if reboot is required + ansible.builtin.command: needs-restarting -r failed_when: false register: reboot_required changed_when: false -- when: updates.stdout_lines | length > 0 and reboot_required.rc != 0 +- name: Reboot if required + when: updates.stdout_lines | length > 0 and reboot_required.rc != 0 block: - - name: reboot the server if required - shell: sleep 3; reboot - ignore_errors: true + - name: Reboot the server if required + ansible.builtin.shell: sleep 3; reboot + ignore_errors: "{{ ansible_check_mode }}" changed_when: false async: 1 poll: 0 - - name: wait for server to come back after reboot - wait_for_connection: + - name: Wait for server to come back after reboot + ansible.builtin.wait_for_connection: timeout: 600 delay: 20 register: reboot_result - - name: reboot time - debug: + - name: Reboot time + ansible.builtin.debug: msg: "The system rebooted in {{ reboot_result.elapsed }} seconds." diff --git a/ansible/update.yml b/ansible/update.yml index 05a919a..63fdda8 100644 --- a/ansible/update.yml +++ b/ansible/update.yml @@ -1,5 +1,5 @@ - hosts: all - gather_facts: yes - become: yes + gather_facts: true + become: true roles: - update