Update ansible/group_vars/all.yml, ansible/roles/base/files/10periodic, ansible/roles/base/files/authorized_keys_administrator, ansible/roles/base/files/authorized_keys_user, ansible/roles/base/files/issue, ansible/roles/base/files/motd, ansible/roles/base/files/sshd_config_secured, ansible/roles/base/files/sshd_config_secured_redhat, ansible/roles/base/tasks/main.yml, ansible/roles/base/tasks/ssh.yml, ansible/roles/base/vars/main.yml, ansible/roles/cloudflare/tasks/main.yml, ansible/roles/k3s/argocd/tasks/main.yml, ansible/roles/k3s/argocd/templates/apps.yaml, ansible/roles/k3s/argocd/templates/argocd.yaml, ansible/roles/k3s/download/tasks/main.yml, ansible/roles/k3s/master/defaults/main.yml, ansible/roles/k3s/master/tasks/main.yml, ansible/roles/k3s/master/templates/content.j2, ansible/roles/k3s/master/templates/k3s.service.j2, ansible/roles/k3s/master/templates/metallb.configmap.j2, ansible/roles/k3s/master/templates/metallb.crds.j2, ansible/roles/k3s/master/templates/metallb.namespace.j2, ansible/roles/k3s/master/templates/vip.rbac.yaml.j2, ansible/roles/k3s/master/templates/vip.yaml.j2, ansible/roles/k3s/node/tasks/main.yml, ansible/roles/k3s/node/templates/k3s.service.j2, ansible/roles/k3s/post/defaults/main.yml, ansible/roles/k3s/post/tasks/main.yml, ansible/roles/k3s/post/templates/metallb.crs.j2, ansible/roles/k3s/prereq/tasks/main.yml, ansible/roles/update/tasks/main.yml, ansible/templates/packages.yml, ansible/ansible.cfg
This commit is contained in:
4
ansible/roles/base/files/10periodic
Normal file
4
ansible/roles/base/files/10periodic
Normal file
@@ -0,0 +1,4 @@
|
||||
APT::Periodic::Update-Package-Lists "1";
|
||||
APT::Periodic::Download-Upgradeable-Packages "1";
|
||||
APT::Periodic::AutocleanInterval "7";
|
||||
APT::Periodic::Unattended-Upgrade "1";
|
||||
1
ansible/roles/base/files/authorized_keys_administrator
Normal file
1
ansible/roles/base/files/authorized_keys_administrator
Normal file
@@ -0,0 +1 @@
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDTYqag8OKcV6kIitn3Axlyi3Xr9EeybG10wlglw34fYF0pY+OERy7zZKEju4ijZzQ7eWNlcXLYSorm5Tngkvnz4vbM4b9R7gZjTV9drSGDo0BLkMXNuSTrKwGeokcNkxh+HZcWSK4/SE5zPzvkPj1UvmAgQ4P4N79mqPe5/9gAvdrlUWEtuqVdEHc/FMk4kEZsRu4lg58KoghNCRYMYHOyd1rbHsuWpX5NumPxnosWG22jzqj46rUWEXvA7MrCGGbUDlk5+/h7Bvw4O8nGZLEo/qyaYvChTBj/UqYYBssC4VlW/SNJB1yfrklqdtcknmFVJBi174cQtzZDXOerwneh8/+t7wWpcxkWscxYrwdJspzAU/NGk02xDPaG4F1mdgZ6HIZCQAaw/EbaNbiuU+bhdngEIHUvVmdiy4T09FWIWuJxO6FnAiVIU5K8LpqGLTFp7kjOwAczdQ+KVojm/1A5W/ZoTE/y3Ni1fVaOJFCxSgU7qiKAm7hb2ZXvznNgryc= ansible
|
||||
1
ansible/roles/base/files/authorized_keys_user
Normal file
1
ansible/roles/base/files/authorized_keys_user
Normal file
@@ -0,0 +1 @@
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCiUz4GQntmn/btPuGEnk0YWsbpCFqQTh2fEBmUb1UMONhoCc2lhgMaJwDSv9lLX26YCt636A1w04ANsOZycr3ZFGUoEqMU7DT+0A89IKe8kpNT1oIYz6rQQixzY2oNDsWYJUiovEgipccHrj9ry8Ke3/8BtznvVxtmVi0c3gzQiFY76xxeie5A/sXcC3N2YsD0HC1zFYBbj6LG6w4fBVYyYTZy2MjBu+r42GF0YqpDFXykUnG7yrq2j6Vx2LoVMotYNCXBJb6cfb/hN5gnpZYnD0S1Z1m6IfX7snGtHp5uU0UbOFnjVYUCe6h0XBQa2K4KpFiZac69GT/vjyi5sEUfmMyQXH5vrLOZcWdH+abRUk+B4mFmSYxf8514CuhfHX7y7BkCaN41CQr4dbfNLeaQ1jl9RzKieVJb0VYoHYQHZD8AwKKNXM9/DjLK6TIu1q2bN5pE8cweqDUrJSIFXla9ykEdGMXYWFWtUdURpqy0/QTzD8It/W/tauONuwjPu90=
|
||||
3
ansible/roles/base/files/issue
Normal file
3
ansible/roles/base/files/issue
Normal file
@@ -0,0 +1,3 @@
|
||||
Use of this system is restricted to authorized users only, and all use is subjected to an acceptable use policy.
|
||||
|
||||
IF YOU ARE NOT AUTHORIZED TO USE THIS SYSTEM, DISCONNECT NOW.
|
||||
3
ansible/roles/base/files/motd
Normal file
3
ansible/roles/base/files/motd
Normal file
@@ -0,0 +1,3 @@
|
||||
THIS SYSTEM IS FOR AUTHORIZED USE ONLY
|
||||
|
||||
All activities are logged and monitored.
|
||||
94
ansible/roles/base/files/sshd_config_secured
Normal file
94
ansible/roles/base/files/sshd_config_secured
Normal file
@@ -0,0 +1,94 @@
|
||||
# Package generated configuration file
|
||||
# See the sshd_config(5) manpage for details
|
||||
|
||||
# What ports, IPs and protocols we listen for
|
||||
Port 22
|
||||
# Use these options to restrict which interfaces/protocols sshd will bind to
|
||||
#ListenAddress ::
|
||||
#ListenAddress 0.0.0.0
|
||||
Protocol 2
|
||||
# HostKeys for protocol version 2
|
||||
HostKey /etc/ssh/ssh_host_rsa_key
|
||||
HostKey /etc/ssh/ssh_host_dsa_key
|
||||
HostKey /etc/ssh/ssh_host_ecdsa_key
|
||||
HostKey /etc/ssh/ssh_host_ed25519_key
|
||||
#Privilege Separation is turned on for security
|
||||
UsePrivilegeSeparation yes
|
||||
|
||||
# Lifetime and size of ephemeral version 1 server key
|
||||
KeyRegenerationInterval 3600
|
||||
ServerKeyBits 1024
|
||||
|
||||
# Logging
|
||||
SyslogFacility AUTH
|
||||
LogLevel INFO
|
||||
|
||||
# Authentication:
|
||||
LoginGraceTime 120
|
||||
PermitRootLogin no
|
||||
StrictModes yes
|
||||
|
||||
RSAAuthentication yes
|
||||
PubkeyAuthentication yes
|
||||
#AuthorizedKeysFile %h/.ssh/authorized_keys
|
||||
|
||||
# Don't read the user's ~/.rhosts and ~/.shosts files
|
||||
IgnoreRhosts yes
|
||||
# For this to work you will also need host keys in /etc/ssh_known_hosts
|
||||
RhostsRSAAuthentication no
|
||||
# similar for protocol version 2
|
||||
HostbasedAuthentication no
|
||||
# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
|
||||
#IgnoreUserKnownHosts yes
|
||||
|
||||
# To enable empty passwords, change to yes (NOT RECOMMENDED)
|
||||
PermitEmptyPasswords no
|
||||
|
||||
# Change to yes to enable challenge-response passwords (beware issues with
|
||||
# some PAM modules and threads)
|
||||
ChallengeResponseAuthentication no
|
||||
|
||||
# Change to no to disable tunnelled clear text passwords
|
||||
PasswordAuthentication no
|
||||
|
||||
# Kerberos options
|
||||
#KerberosAuthentication no
|
||||
#KerberosGetAFSToken no
|
||||
#KerberosOrLocalPasswd yes
|
||||
#KerberosTicketCleanup yes
|
||||
|
||||
# GSSAPI options
|
||||
#GSSAPIAuthentication no
|
||||
#GSSAPICleanupCredentials yes
|
||||
|
||||
X11Forwarding no
|
||||
X11DisplayOffset 10
|
||||
PrintMotd no
|
||||
PrintLastLog yes
|
||||
TCPKeepAlive yes
|
||||
#UseLogin no
|
||||
|
||||
#MaxStartups 10:30:60
|
||||
#Banner /etc/issue.net
|
||||
|
||||
# Allow client to pass locale environment variables
|
||||
AcceptEnv LANG LC_*
|
||||
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
|
||||
# Set this to 'yes' to enable PAM authentication, account processing,
|
||||
# and session processing. If this is enabled, PAM authentication will
|
||||
# be allowed through the ChallengeResponseAuthentication and
|
||||
# PasswordAuthentication. Depending on your PAM configuration,
|
||||
# PAM authentication via ChallengeResponseAuthentication may bypass
|
||||
# the setting of "PermitRootLogin without-password".
|
||||
# If you just want the PAM account and session checks to run without
|
||||
# PAM authentication, then enable this but set PasswordAuthentication
|
||||
# and ChallengeResponseAuthentication to 'no'.
|
||||
UsePAM yes
|
||||
|
||||
ClientAliveInterval 300
|
||||
|
||||
#enable remote powershell
|
||||
#Subsystem powershell /usr/bin/pwsh -sshs -NoLogo
|
||||
|
||||
135
ansible/roles/base/files/sshd_config_secured_redhat
Normal file
135
ansible/roles/base/files/sshd_config_secured_redhat
Normal file
@@ -0,0 +1,135 @@
|
||||
# $OpenBSD: sshd_config,v 1.104 2021/07/02 05:11:21 dtucker Exp $
|
||||
|
||||
# This is the sshd server system-wide configuration file. See
|
||||
# sshd_config(5) for more information.
|
||||
|
||||
# This sshd was compiled with PATH=/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin
|
||||
|
||||
# The strategy used for options in the default sshd_config shipped with
|
||||
# OpenSSH is to specify options with their default value where
|
||||
# possible, but leave them commented. Uncommented options override the
|
||||
# default value.
|
||||
|
||||
# To modify the system-wide sshd configuration, create a *.conf file under
|
||||
# /etc/ssh/sshd_config.d/ which will be automatically included below
|
||||
Include /etc/ssh/sshd_config.d/*.conf
|
||||
|
||||
# If you want to change the port on a SELinux system, you have to tell
|
||||
# SELinux about this change.
|
||||
# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER
|
||||
#
|
||||
#Port 22
|
||||
#AddressFamily any
|
||||
#ListenAddress 0.0.0.0
|
||||
#ListenAddress ::
|
||||
|
||||
#HostKey /etc/ssh/ssh_host_rsa_key
|
||||
#HostKey /etc/ssh/ssh_host_ecdsa_key
|
||||
#HostKey /etc/ssh/ssh_host_ed25519_key
|
||||
|
||||
# Ciphers and keying
|
||||
#RekeyLimit default none
|
||||
|
||||
# Logging
|
||||
#SyslogFacility AUTH
|
||||
#LogLevel INFO
|
||||
|
||||
# Authentication:
|
||||
|
||||
#LoginGraceTime 2m
|
||||
PermitRootLogin no
|
||||
#StrictModes yes
|
||||
#MaxAuthTries 6
|
||||
#MaxSessions 10
|
||||
|
||||
PubkeyAuthentication yes
|
||||
|
||||
# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2
|
||||
# but this is overridden so installations will only check .ssh/authorized_keys
|
||||
AuthorizedKeysFile .ssh/authorized_keys
|
||||
|
||||
#AuthorizedPrincipalsFile none
|
||||
|
||||
#AuthorizedKeysCommand none
|
||||
#AuthorizedKeysCommandUser nobody
|
||||
|
||||
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
|
||||
#HostbasedAuthentication no
|
||||
# Change to yes if you don't trust ~/.ssh/known_hosts for
|
||||
# HostbasedAuthentication
|
||||
#IgnoreUserKnownHosts no
|
||||
# Don't read the user's ~/.rhosts and ~/.shosts files
|
||||
#IgnoreRhosts yes
|
||||
|
||||
# To disable tunneled clear text passwords, change to no here!
|
||||
#PasswordAuthentication yes
|
||||
PermitEmptyPasswords no
|
||||
|
||||
# Change to no to disable s/key passwords
|
||||
#KbdInteractiveAuthentication yes
|
||||
|
||||
# Kerberos options
|
||||
#KerberosAuthentication no
|
||||
#KerberosOrLocalPasswd yes
|
||||
#KerberosTicketCleanup yes
|
||||
#KerberosGetAFSToken no
|
||||
#KerberosUseKuserok yes
|
||||
|
||||
# GSSAPI options
|
||||
#GSSAPIAuthentication no
|
||||
#GSSAPICleanupCredentials yes
|
||||
#GSSAPIStrictAcceptorCheck yes
|
||||
#GSSAPIKeyExchange no
|
||||
#GSSAPIEnablek5users no
|
||||
|
||||
# Set this to 'yes' to enable PAM authentication, account processing,
|
||||
# and session processing. If this is enabled, PAM authentication will
|
||||
# be allowed through the KbdInteractiveAuthentication and
|
||||
# PasswordAuthentication. Depending on your PAM configuration,
|
||||
# PAM authentication via KbdInteractiveAuthentication may bypass
|
||||
# the setting of "PermitRootLogin without-password".
|
||||
# If you just want the PAM account and session checks to run without
|
||||
# PAM authentication, then enable this but set PasswordAuthentication
|
||||
# and KbdInteractiveAuthentication to 'no'.
|
||||
# WARNING: 'UsePAM no' is not supported in Fedora and may cause several
|
||||
# problems.
|
||||
#UsePAM no
|
||||
|
||||
#AllowAgentForwarding yes
|
||||
#AllowTcpForwarding yes
|
||||
#GatewayPorts no
|
||||
#X11Forwarding no
|
||||
#X11DisplayOffset 10
|
||||
#X11UseLocalhost yes
|
||||
#PermitTTY yes
|
||||
#PrintMotd yes
|
||||
#PrintLastLog yes
|
||||
#TCPKeepAlive yes
|
||||
#PermitUserEnvironment no
|
||||
#Compression delayed
|
||||
ClientAliveInterval 300
|
||||
#ClientAliveCountMax 3
|
||||
#UseDNS no
|
||||
#PidFile /var/run/sshd.pid
|
||||
#MaxStartups 10:30:100
|
||||
#PermitTunnel no
|
||||
#ChrootDirectory none
|
||||
#VersionAddendum none
|
||||
|
||||
# no default banner path
|
||||
#Banner none
|
||||
|
||||
# override default of no subsystems
|
||||
Subsystem sftp /usr/libexec/openssh/sftp-server
|
||||
|
||||
# Example of overriding settings on a per-user basis
|
||||
#Match User anoncvs
|
||||
# X11Forwarding no
|
||||
# AllowTcpForwarding no
|
||||
# PermitTTY no
|
||||
# ForceCommand cvs server
|
||||
PasswordAuthentication no
|
||||
|
||||
#enable remote powershell
|
||||
#Subsystem powershell /usr/bin/pwsh -sshs -NoLogo
|
||||
|
||||
82
ansible/roles/base/tasks/main.yml
Normal file
82
ansible/roles/base/tasks/main.yml
Normal file
@@ -0,0 +1,82 @@
|
||||
---
|
||||
- name: Run Package tasks
|
||||
include_tasks:
|
||||
file: ./templates/packages.yml
|
||||
|
||||
- name: Create user account
|
||||
user:
|
||||
name: "{{ username }}"
|
||||
password: "{{ userpassword }}"
|
||||
groups: sudo
|
||||
shell: /bin/bash
|
||||
state: present
|
||||
createhome: yes
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Create user account
|
||||
user:
|
||||
name: "{{ username }}"
|
||||
password: "{{ userpassword }}"
|
||||
shell: /bin/bash
|
||||
groups: wheel
|
||||
state: present
|
||||
createhome: yes
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Run SSH tasks
|
||||
include_tasks:
|
||||
file: ssh.yml
|
||||
|
||||
- name: Copy unattended-upgrades file
|
||||
copy:
|
||||
src: files/10periodic
|
||||
dest: /etc/apt/apt.conf.d/10periodic
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
force: yes
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Remove undesirable packages
|
||||
package:
|
||||
name: "{{ unnecessary_software }}"
|
||||
state: absent
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Stop and disable unnecessary services
|
||||
service:
|
||||
name: "{{ item }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
with_items: "{{ unnecessary_services }}"
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Set a message of the day
|
||||
copy:
|
||||
dest: /etc/motd
|
||||
src: files/motd
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
- name: Set a login banner
|
||||
copy:
|
||||
dest: "{{ item }}"
|
||||
src: files/issue
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
with_items:
|
||||
- /etc/issue
|
||||
- /etc/issue.net
|
||||
|
||||
- name: set timezone
|
||||
shell: timedatectl set-timezone America/Chicago
|
||||
|
||||
- name: Enable cockpit
|
||||
systemd:
|
||||
name: cockpit
|
||||
daemon_reload: yes
|
||||
state: restarted
|
||||
enabled: yes
|
||||
when: ansible_os_family == "RedHat"
|
||||
47
ansible/roles/base/tasks/ssh.yml
Normal file
47
ansible/roles/base/tasks/ssh.yml
Normal file
@@ -0,0 +1,47 @@
|
||||
- name: Deploy SSH Key (administrator)
|
||||
copy:
|
||||
dest: /home/administrator/.ssh/authorized_keys
|
||||
src: files/authorized_keys_administrator
|
||||
force: true
|
||||
|
||||
- name: ensure ssh folder exists for user
|
||||
file:
|
||||
path: /home/user/.ssh
|
||||
state: directory
|
||||
|
||||
- name: Deploy SSH Key (user)
|
||||
copy:
|
||||
dest: /home/user/.ssh/authorized_keys
|
||||
src: files/authorized_keys_user
|
||||
force: true
|
||||
|
||||
- name: Remove Root SSH Configuration
|
||||
file:
|
||||
path: /root/.ssh
|
||||
state: absent
|
||||
|
||||
- name: Copy Secured SSHD Configuration
|
||||
copy:
|
||||
src: files/sshd_config_secured
|
||||
dest: /etc/ssh/sshd_config
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Copy Secured SSHD Configuration
|
||||
copy:
|
||||
src: files/sshd_config_secured_redhat
|
||||
dest: /etc/ssh/sshd_config
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Restart SSHD
|
||||
systemd:
|
||||
name: sshd
|
||||
daemon_reload: yes
|
||||
state: restarted
|
||||
enabled: yes
|
||||
ignore_errors: yes
|
||||
25
ansible/roles/base/vars/main.yml
Normal file
25
ansible/roles/base/vars/main.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
required_packages:
|
||||
- ufw
|
||||
- qemu-guest-agent
|
||||
- fail2ban
|
||||
- unattended-upgrades
|
||||
- cockpit
|
||||
- nfs-common
|
||||
- open-iscsi
|
||||
|
||||
redhat_required_packages:
|
||||
- qemu-guest-agent
|
||||
- cockpit
|
||||
- iscsi-initiator-utils
|
||||
|
||||
unnecessary_services:
|
||||
- postfix
|
||||
- telnet
|
||||
|
||||
unnecessary_software:
|
||||
- tcpdump
|
||||
- nmap-ncat
|
||||
- wpa_supplicant
|
||||
|
||||
|
||||
11
ansible/roles/cloudflare/tasks/main.yml
Normal file
11
ansible/roles/cloudflare/tasks/main.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Update Cloudflare
|
||||
community.general.cloudflare_dns:
|
||||
zone: "{{ item.zone }}"
|
||||
record: "{{ item.record }}"
|
||||
state: "{{ item.state }}"
|
||||
type: A
|
||||
proxied: "{{ item.proxied }}"
|
||||
value: "{{ lookup('env','external_ip') }}"
|
||||
api_token: "{{ lookup('env','cloudflareapi') }}"
|
||||
with_items: "{{ DNS }}"
|
||||
29
ansible/roles/k3s/argocd/tasks/main.yml
Normal file
29
ansible/roles/k3s/argocd/tasks/main.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
- name: copy configs
|
||||
copy:
|
||||
src: ./roles/k3s/argocd/templates/
|
||||
dest: /opt/argocd
|
||||
owner: administrator
|
||||
group: administrator
|
||||
mode: "0664"
|
||||
force: yes
|
||||
|
||||
- name: copy configs
|
||||
copy:
|
||||
src: "{{ lookup('env','kubeseal') }}"
|
||||
dest: /opt/kubeseal.yaml
|
||||
owner: administrator
|
||||
group: administrator
|
||||
mode: "0600"
|
||||
force: yes
|
||||
|
||||
- name: Apply Kubeseal master key
|
||||
command: k3s kubectl apply -f /opt/kubeseal.yaml --force
|
||||
|
||||
- name: Apply ArgoCD
|
||||
command: k3s kubectl apply -f /opt/argocd/argocd.yaml -n argocd
|
||||
|
||||
- name: Apply ArgoCD Apps
|
||||
command: k3s kubectl apply -f /opt/argocd/apps.yaml -n argocd
|
||||
|
||||
|
||||
22
ansible/roles/k3s/argocd/templates/apps.yaml
Normal file
22
ansible/roles/k3s/argocd/templates/apps.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: argocd
|
||||
namespace: argocd
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/DeveloperDurp/homelab.git
|
||||
targetRevision: main
|
||||
path: argocd
|
||||
directory:
|
||||
recurse: true
|
||||
destination:
|
||||
namespace: argocd
|
||||
name: in-cluster
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
selfHeal: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
10784
ansible/roles/k3s/argocd/templates/argocd.yaml
Normal file
10784
ansible/roles/k3s/argocd/templates/argocd.yaml
Normal file
File diff suppressed because it is too large
Load Diff
10
ansible/roles/k3s/download/tasks/main.yml
Normal file
10
ansible/roles/k3s/download/tasks/main.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: Download k3s binary x64
|
||||
get_url:
|
||||
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
|
||||
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
|
||||
dest: /usr/local/bin/k3s
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when: ansible_facts.architecture == "x86_64"
|
||||
12
ansible/roles/k3s/master/defaults/main.yml
Normal file
12
ansible/roles/k3s/master/defaults/main.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
ansible_user: root
|
||||
server_init_args: >-
|
||||
{% if groups['master'] | length > 1 %}
|
||||
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
|
||||
--cluster-init
|
||||
{% else %}
|
||||
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip }}:6443
|
||||
{% endif %}
|
||||
--token {{ k3s_token }}
|
||||
{% endif %}
|
||||
{{ extra_server_args | default('') }}
|
||||
197
ansible/roles/k3s/master/tasks/main.yml
Normal file
197
ansible/roles/k3s/master/tasks/main.yml
Normal file
@@ -0,0 +1,197 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy vip rbac manifest to first master
|
||||
template:
|
||||
src: "vip.rbac.yaml.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy vip manifest to first master
|
||||
template:
|
||||
src: "vip.yaml.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
# these will be copied and installed now, then tested later and apply config
|
||||
- name: Copy metallb namespace to first master
|
||||
template:
|
||||
src: "metallb.namespace.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy metallb namespace to first master
|
||||
template:
|
||||
src: "metallb.crds.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Init cluster inside the transient k3s-init service
|
||||
command:
|
||||
cmd: "systemd-run -p RestartSec=2 \
|
||||
-p Restart=on-failure \
|
||||
--unit=k3s-init \
|
||||
k3s server {{ server_init_args }}"
|
||||
creates: "{{ systemd_dir }}/k3s.service"
|
||||
args:
|
||||
warn: false # The ansible systemd module does not support transient units
|
||||
|
||||
- name: Verification
|
||||
block:
|
||||
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
|
||||
command:
|
||||
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
|
||||
register: nodes
|
||||
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length)
|
||||
retries: "{{ retry_count | default(20) }}"
|
||||
delay: 10
|
||||
changed_when: false
|
||||
always:
|
||||
- name: Save logs of k3s-init.service
|
||||
include_tasks: fetch_k3s_init_logs.yml
|
||||
when: log_destination
|
||||
vars:
|
||||
log_destination: >-
|
||||
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
|
||||
- name: Kill the temporary service used for initialization
|
||||
systemd:
|
||||
name: k3s-init
|
||||
state: stopped
|
||||
failed_when: false
|
||||
when: not ansible_check_mode
|
||||
|
||||
- name: Copy K3s service file
|
||||
register: k3s_service
|
||||
template:
|
||||
src: "k3s.service.j2"
|
||||
dest: "{{ systemd_dir }}/k3s.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
- name: Enable and check K3s service
|
||||
systemd:
|
||||
name: k3s
|
||||
daemon_reload: yes
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: Wait for node-token
|
||||
wait_for:
|
||||
path: /var/lib/rancher/k3s/server/node-token
|
||||
|
||||
- name: Register node-token file access mode
|
||||
stat:
|
||||
path: /var/lib/rancher/k3s/server
|
||||
register: p
|
||||
|
||||
- name: Change file access node-token
|
||||
file:
|
||||
path: /var/lib/rancher/k3s/server
|
||||
mode: "g+rx,o+rx"
|
||||
|
||||
- name: Read node-token from master
|
||||
slurp:
|
||||
src: /var/lib/rancher/k3s/server/node-token
|
||||
register: node_token
|
||||
|
||||
- name: Store Master node-token
|
||||
set_fact:
|
||||
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
|
||||
|
||||
- name: Restore node-token file access
|
||||
file:
|
||||
path: /var/lib/rancher/k3s/server
|
||||
mode: "{{ p.stat.mode }}"
|
||||
|
||||
- name: Create directory .kube
|
||||
file:
|
||||
path: ~{{ item }}/.kube
|
||||
state: directory
|
||||
owner: "{{ item }}"
|
||||
mode: "u=rwx,g=rx,o="
|
||||
loop:
|
||||
- "{{ ansible_user }}"
|
||||
- "{{ username }}"
|
||||
|
||||
- name: Copy config file to user home directory
|
||||
copy:
|
||||
src: /etc/rancher/k3s/k3s.yaml
|
||||
dest: ~{{ item }}/.kube/config
|
||||
remote_src: yes
|
||||
owner: "{{ item }}"
|
||||
mode: "u=rw,g=,o="
|
||||
loop:
|
||||
- "{{ ansible_user }}"
|
||||
- "{{ username }}"
|
||||
|
||||
- name: Configure kubectl cluster to {{ endpoint_url }}
|
||||
command: >-
|
||||
k3s kubectl config set-cluster default
|
||||
--server={{ endpoint_url }}
|
||||
--kubeconfig ~{{ item }}/.kube/config
|
||||
changed_when: true
|
||||
loop:
|
||||
- "{{ ansible_user }}"
|
||||
- "{{ username }}"
|
||||
vars:
|
||||
endpoint_url: >-
|
||||
https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443
|
||||
# Deactivated linter rules:
|
||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||
# would be undefined. This will not be the case during playbook execution.
|
||||
# noqa jinja[invalid]
|
||||
|
||||
- name: Create kubectl symlink
|
||||
file:
|
||||
src: /usr/local/bin/k3s
|
||||
dest: /usr/local/bin/kubectl
|
||||
state: link
|
||||
|
||||
- name: Create crictl symlink
|
||||
file:
|
||||
src: /usr/local/bin/k3s
|
||||
dest: /usr/local/bin/crictl
|
||||
state: link
|
||||
|
||||
- name: Get contents of manifests folder
|
||||
find:
|
||||
paths: /var/lib/rancher/k3s/server/manifests
|
||||
file_type: file
|
||||
register: k3s_server_manifests
|
||||
|
||||
- name: Get sub dirs of manifests folder
|
||||
find:
|
||||
paths: /var/lib/rancher/k3s/server/manifests
|
||||
file_type: directory
|
||||
register: k3s_server_manifests_directories
|
||||
|
||||
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- "{{ k3s_server_manifests.files }}"
|
||||
- "{{ k3s_server_manifests_directories.files }}"
|
||||
loop_control:
|
||||
label: "{{ item.path }}"
|
||||
|
||||
5
ansible/roles/k3s/master/templates/content.j2
Normal file
5
ansible/roles/k3s/master/templates/content.j2
Normal file
@@ -0,0 +1,5 @@
|
||||
{#
|
||||
This is a really simple template that just outputs the
|
||||
value of the "content" variable.
|
||||
#}
|
||||
{{ content }}
|
||||
24
ansible/roles/k3s/master/templates/k3s.service.j2
Normal file
24
ansible/roles/k3s/master/templates/k3s.service.j2
Normal file
@@ -0,0 +1,24 @@
|
||||
[Unit]
|
||||
Description=Lightweight Kubernetes
|
||||
Documentation=https://k3s.io
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||
ExecStartPre=-/sbin/modprobe overlay
|
||||
ExecStart=/usr/local/bin/k3s server {{ extra_server_args | default("") }}
|
||||
KillMode=process
|
||||
Delegate=yes
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
TimeoutStartSec=0
|
||||
Restart=always
|
||||
RestartSec=5s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
12
ansible/roles/k3s/master/templates/metallb.configmap.j2
Normal file
12
ansible/roles/k3s/master/templates/metallb.configmap.j2
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: config
|
||||
data:
|
||||
config: |
|
||||
address-pools:
|
||||
- name: default
|
||||
protocol: layer2
|
||||
addresses:
|
||||
- {{ metal_lb_ip_range }}
|
||||
1797
ansible/roles/k3s/master/templates/metallb.crds.j2
Normal file
1797
ansible/roles/k3s/master/templates/metallb.crds.j2
Normal file
File diff suppressed because it is too large
Load Diff
6
ansible/roles/k3s/master/templates/metallb.namespace.j2
Normal file
6
ansible/roles/k3s/master/templates/metallb.namespace.j2
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: metallb-system
|
||||
labels:
|
||||
app: metallb
|
||||
32
ansible/roles/k3s/master/templates/vip.rbac.yaml.j2
Normal file
32
ansible/roles/k3s/master/templates/vip.rbac.yaml.j2
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-vip
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
name: system:kube-vip-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "services/status", "nodes", "endpoints"]
|
||||
verbs: ["list","get","watch", "update"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["list", "get", "watch", "update", "create"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:kube-vip-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:kube-vip-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-vip
|
||||
namespace: kube-system
|
||||
77
ansible/roles/k3s/master/templates/vip.yaml.j2
Normal file
77
ansible/roles/k3s/master/templates/vip.yaml.j2
Normal file
@@ -0,0 +1,77 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-vip-ds
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: kube-vip-ds
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: kube-vip-ds
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
containers:
|
||||
- args:
|
||||
- manager
|
||||
env:
|
||||
- name: vip_arp
|
||||
value: "true"
|
||||
- name: port
|
||||
value: "6443"
|
||||
- name: vip_interface
|
||||
value: {{ flannel_iface }}
|
||||
- name: vip_cidr
|
||||
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
|
||||
- name: cp_enable
|
||||
value: "true"
|
||||
- name: cp_namespace
|
||||
value: kube-system
|
||||
- name: vip_ddns
|
||||
value: "false"
|
||||
- name: svc_enable
|
||||
value: "false"
|
||||
- name: vip_leaderelection
|
||||
value: "true"
|
||||
- name: vip_leaseduration
|
||||
value: "15"
|
||||
- name: vip_renewdeadline
|
||||
value: "10"
|
||||
- name: vip_retryperiod
|
||||
value: "2"
|
||||
- name: address
|
||||
value: {{ apiserver_endpoint }}
|
||||
image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_tag_version }}
|
||||
imagePullPolicy: Always
|
||||
name: kube-vip
|
||||
resources: {}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
- SYS_TIME
|
||||
hostNetwork: true
|
||||
serviceAccountName: kube-vip
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
updateStrategy: {}
|
||||
status:
|
||||
currentNumberScheduled: 0
|
||||
desiredNumberScheduled: 0
|
||||
numberMisscheduled: 0
|
||||
numberReady: 0
|
||||
16
ansible/roles/k3s/node/tasks/main.yml
Normal file
16
ansible/roles/k3s/node/tasks/main.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
|
||||
- name: Copy K3s service file
|
||||
template:
|
||||
src: "k3s.service.j2"
|
||||
dest: "{{ systemd_dir }}/k3s-node.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Enable and check K3s service
|
||||
systemd:
|
||||
name: k3s-node
|
||||
daemon_reload: yes
|
||||
state: restarted
|
||||
enabled: yes
|
||||
24
ansible/roles/k3s/node/templates/k3s.service.j2
Normal file
24
ansible/roles/k3s/node/templates/k3s.service.j2
Normal file
@@ -0,0 +1,24 @@
|
||||
[Unit]
|
||||
Description=Lightweight Kubernetes
|
||||
Documentation=https://k3s.io
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||
ExecStartPre=-/sbin/modprobe overlay
|
||||
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
|
||||
KillMode=process
|
||||
Delegate=yes
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
TimeoutStartSec=0
|
||||
Restart=always
|
||||
RestartSec=5s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
3
ansible/roles/k3s/post/defaults/main.yml
Normal file
3
ansible/roles/k3s/post/defaults/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# Timeout to wait for MetalLB services to come up
|
||||
metal_lb_available_timeout: 120s
|
||||
94
ansible/roles/k3s/post/tasks/main.yml
Normal file
94
ansible/roles/k3s/post/tasks/main.yml
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
- name: Create manifests directory for temp configuration
|
||||
file:
|
||||
path: /tmp/k3s
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
mode: 0755
|
||||
with_items: "{{ groups['master'] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Copy metallb CRs manifest to first master
|
||||
template:
|
||||
src: "metallb.crs.j2"
|
||||
dest: "/tmp/k3s/metallb-crs.yaml"
|
||||
owner: "{{ ansible_user }}"
|
||||
mode: 0755
|
||||
with_items: "{{ groups['master'] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Test metallb-system namespace
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system
|
||||
changed_when: false
|
||||
with_items: "{{ groups['master'] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Wait for MetalLB resources
|
||||
command: >-
|
||||
k3s kubectl wait {{ item.resource }}
|
||||
--namespace='metallb-system'
|
||||
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
|
||||
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
|
||||
{% if item.condition | default(False) -%}{{ item.condition }}{%- endif %}
|
||||
--timeout='{{ metal_lb_available_timeout }}'
|
||||
changed_when: false
|
||||
run_once: true
|
||||
with_items:
|
||||
- description: controller
|
||||
resource: deployment
|
||||
name: controller
|
||||
condition: --for condition=Available=True
|
||||
- description: webhook service
|
||||
resource: pod
|
||||
selector: component=controller
|
||||
condition: --for=jsonpath='{.status.phase}'=Running
|
||||
- description: pods in replica sets
|
||||
resource: pod
|
||||
selector: component=controller,app=metallb
|
||||
condition: --for condition=Ready
|
||||
- description: ready replicas of controller
|
||||
resource: replicaset
|
||||
selector: component=controller,app=metallb
|
||||
condition: --for=jsonpath='{.status.readyReplicas}'=1
|
||||
- description: fully labeled replicas of controller
|
||||
resource: replicaset
|
||||
selector: component=controller,app=metallb
|
||||
condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1
|
||||
- description: available replicas of controller
|
||||
resource: replicaset
|
||||
selector: component=controller,app=metallb
|
||||
condition: --for=jsonpath='{.status.availableReplicas}'=1
|
||||
loop_control:
|
||||
label: "{{ item.description }}"
|
||||
|
||||
- name: Test metallb-system webhook-service endpoint
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get endpoints webhook-service
|
||||
changed_when: false
|
||||
with_items: "{{ groups['master'] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Apply metallb CRs
|
||||
command: >-
|
||||
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
|
||||
--timeout='{{ metal_lb_available_timeout }}'
|
||||
register: this
|
||||
changed_when: false
|
||||
run_once: true
|
||||
until: this.rc == 0
|
||||
retries: 5
|
||||
|
||||
- name: Test metallb-system resources
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get {{ item }}
|
||||
changed_when: false
|
||||
run_once: true
|
||||
with_items:
|
||||
- IPAddressPool
|
||||
- L2Advertisement
|
||||
|
||||
- name: Remove tmp directory used for manifests
|
||||
file:
|
||||
path: /tmp/k3s
|
||||
state: absent
|
||||
21
ansible/roles/k3s/post/templates/metallb.crs.j2
Normal file
21
ansible/roles/k3s/post/templates/metallb.crs.j2
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: first-pool
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
{% if metal_lb_ip_range is string %}
|
||||
{# metal_lb_ip_range was used in the legacy way: single string instead of a list #}
|
||||
{# => transform to list with single element #}
|
||||
{% set metal_lb_ip_range = [metal_lb_ip_range] %}
|
||||
{% endif %}
|
||||
{% for range in metal_lb_ip_range %}
|
||||
- {{ range }}
|
||||
{% endfor %}
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
65
ansible/roles/k3s/prereq/tasks/main.yml
Normal file
65
ansible/roles/k3s/prereq/tasks/main.yml
Normal file
@@ -0,0 +1,65 @@
|
||||
---
|
||||
- name: Set same timezone on every Server
|
||||
timezone:
|
||||
name: "{{ system_timezone }}"
|
||||
when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
|
||||
|
||||
- name: Set SELinux to disabled state
|
||||
selinux:
|
||||
state: disabled
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Enable IPv4 forwarding
|
||||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: "1"
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
- name: Enable IPv6 forwarding
|
||||
sysctl:
|
||||
name: net.ipv6.conf.all.forwarding
|
||||
value: "1"
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
- name: Enable IPv6 router advertisements
|
||||
sysctl:
|
||||
name: net.ipv6.conf.all.accept_ra
|
||||
value: "2"
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
- name: Add br_netfilter to /etc/modules-load.d/
|
||||
copy:
|
||||
content: "br_netfilter"
|
||||
dest: /etc/modules-load.d/br_netfilter.conf
|
||||
mode: "u=rw,g=,o="
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Load br_netfilter
|
||||
modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Set bridge-nf-call-iptables (just to be sure)
|
||||
sysctl:
|
||||
name: "{{ item }}"
|
||||
value: "1"
|
||||
state: present
|
||||
reload: yes
|
||||
when: ansible_os_family == "RedHat"
|
||||
loop:
|
||||
- net.bridge.bridge-nf-call-iptables
|
||||
- net.bridge.bridge-nf-call-ip6tables
|
||||
|
||||
- name: Add /usr/local/bin to sudo secure_path
|
||||
lineinfile:
|
||||
line: 'Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
|
||||
regexp: "Defaults(\\s)*secure_path(\\s)*="
|
||||
state: present
|
||||
insertafter: EOF
|
||||
path: /etc/sudoers
|
||||
validate: 'visudo -cf %s'
|
||||
when: ansible_os_family == "RedHat"
|
||||
39
ansible/roles/update/tasks/main.yml
Normal file
39
ansible/roles/update/tasks/main.yml
Normal file
@@ -0,0 +1,39 @@
|
||||
- name: check packages for updates
|
||||
shell: yum list updates | awk 'f;/Updated Packages/{f=1;}' | awk '{ print $1 }'
|
||||
changed_when: updates.stdout_lines | length > 0
|
||||
args:
|
||||
warn: false
|
||||
register: updates
|
||||
- name: display count
|
||||
debug:
|
||||
msg: "Found {{ updates.stdout_lines | length }} packages to be updated:\n\n{{ updates.stdout }}"
|
||||
- when: updates.stdout_lines | length > 0
|
||||
block:
|
||||
- name: install updates using yum
|
||||
yum:
|
||||
name: "*"
|
||||
state: latest
|
||||
- name: install yum-utils
|
||||
package:
|
||||
name: yum-utils
|
||||
- name: check if reboot is required
|
||||
shell: needs-restarting -r
|
||||
failed_when: false
|
||||
register: reboot_required
|
||||
changed_when: false
|
||||
- when: updates.stdout_lines | length > 0 and reboot_required.rc != 0
|
||||
block:
|
||||
- name: reboot the server if required
|
||||
shell: sleep 3; reboot
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
async: 1
|
||||
poll: 0
|
||||
- name: wait for server to come back after reboot
|
||||
wait_for_connection:
|
||||
timeout: 600
|
||||
delay: 20
|
||||
register: reboot_result
|
||||
- name: reboot time
|
||||
debug:
|
||||
msg: "The system rebooted in {{ reboot_result.elapsed }} seconds."
|
||||
Reference in New Issue
Block a user