initial commit

This commit is contained in:
2022-10-11 08:13:16 -05:00
commit fe686e6579
41 changed files with 14059 additions and 0 deletions

16
ansible.cfg Normal file
View File

@@ -0,0 +1,16 @@
[defaults]
inventory = ./group_vars/hosts.ini
roles_path = ./roles
become = True
host_key_checking = False
host_key_check = False
remote_user = administrator
pipelining = True
nocows = True
remote_tmp = ~/.ansible/tmp
local_tmp = ~/.ansible/tmp
deprecation_warnings = False
callback_whitelist = profile_tasks
[ssh_connection]
ssh_args = -o UserKnownHostsFile=/dev/null -i ansible

58
group_vars/all.yml Normal file
View File

@@ -0,0 +1,58 @@
---
k3s_version: v1.24.4+k3s1
ansible_user: administrator
systemd_dir: /etc/systemd/system
# Set your timezone
system_timezone: "America/Chicago"
# interface which will be used for flannel
flannel_iface: "eth0"
# apiserver_endpoint is virtual ip-address which will be configured on each master
apiserver_endpoint: "192.168.20.120"
# k3s_token is required masters can talk together securely
k3s_token: "{{ lookup('env','k3s_token') }}"
# The IP on which the node is reachable in the cluster.
# Here, a sensible default is provided, you can still override
# it for each of your hosts, though.
k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
# Disable the taint manually by setting: k3s_master_taint = false
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
# these arguments are recommended for servers as well as agents:
extra_args: >-
--flannel-iface={{ flannel_iface }}
--node-ip={{ k3s_node_ip }}
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
extra_server_args: >-
{{ extra_args }}
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
--tls-san {{ apiserver_endpoint }}
--disable servicelb
--disable traefik
--kube-controller-manager-arg bind-address=0.0.0.0
--kube-proxy-arg metrics-bind-address=0.0.0.0
--kube-scheduler-arg bind-address=0.0.0.0
--etcd-expose-metrics true
--kubelet-arg containerd=/run/k3s/containerd/containerd.sock
extra_agent_args: >-
{{ extra_args }}
--kubelet-arg node-status-update-frequency=5s
# image tag for kube-vip
kube_vip_tag_version: "v0.5.0"
# image tag for metal lb
metal_lb_speaker_tag_version: "v0.13.5"
metal_lb_controller_tag_version: "v0.13.5"
# metallb ip range for load balancer
metal_lb_ip_range: "192.168.20.130-192.168.20.140"
username: "user"
userpassword: '$6$ml9etuD2RAvybIAl$xGbh95q5PIrZQxhXBRR8oHQZcb510vhDxBsdwkBBxSo6IzOfS0WkbYDUgyuu4cvczJes19c.EJjfjO2ROoRsx1'

1
group_vars/ansible.pub Normal file
View File

@@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDTYqag8OKcV6kIitn3Axlyi3Xr9EeybG10wlglw34fYF0pY+OERy7zZKEju4ijZzQ7eWNlcXLYSorm5Tngkvnz4vbM4b9R7gZjTV9drSGDo0BLkMXNuSTrKwGeokcNkxh+HZcWSK4/SE5zPzvkPj1UvmAgQ4P4N79mqPe5/9gAvdrlUWEtuqVdEHc/FMk4kEZsRu4lg58KoghNCRYMYHOyd1rbHsuWpX5NumPxnosWG22jzqj46rUWEXvA7MrCGGbUDlk5+/h7Bvw4O8nGZLEo/qyaYvChTBj/UqYYBssC4VlW/SNJB1yfrklqdtcknmFVJBi174cQtzZDXOerwneh8/+t7wWpcxkWscxYrwdJspzAU/NGk02xDPaG4F1mdgZ6HIZCQAaw/EbaNbiuU+bhdngEIHUvVmdiy4T09FWIWuJxO6FnAiVIU5K8LpqGLTFp7kjOwAczdQ+KVojm/1A5W/ZoTE/y3Ni1fVaOJFCxSgU7qiKAm7hb2ZXvznNgryc=

14
group_vars/hosts.ini Normal file
View File

@@ -0,0 +1,14 @@
[master]
192.168.20.121
192.168.20.122
192.168.20.123
[node]
192.168.20.124
192.168.20.125
192.168.20.126
[k3s_cluster:children]
master
node

View File

@@ -0,0 +1,6 @@
---
collections:
- name: ansible.utils
- name: community.general
- name: ansible.posix
- name: kubernetes.core

50
main.yml Normal file
View File

@@ -0,0 +1,50 @@
---
- hosts: all
gather_facts: yes
become: yes
roles:
- base
- hosts: k3s_cluster
gather_facts: yes
become: yes
roles:
- k3s/prereq
- k3s/download
- hosts: master
become: yes
roles:
- k3s/master
- hosts: node
become: yes
roles:
- k3s/node
- hosts: master
become: yes
roles:
- role: k3s/post
- hosts: master[0]
become: yes
roles:
- k3s/argocd
- hosts: master[0]
become: no
roles:
- cloudflare
vars:
DNS:
- {record: 'bitwarden', zone: 'durp.info', proxied: 'yes', state: 'present'}
- {record: 'nextcloud', zone: 'durp.info', proxied: 'yes', state: 'present'}
- {record: 'grafana', zone: 'durp.info', proxied: 'yes', state: 'present'}
- {record: 'alertmanager', zone: 'durp.info', proxied: 'yes', state: 'present'}
- {record: 'prometheus', zone: 'durp.info', proxied: 'yes', state: 'present'}
- {record: 'kong', zone: 'durp.info', proxied: 'yes', state: 'present'}
- {record: 'links', zone: 'durp.info', proxied: 'yes', state: 'present'}
- {record: 'whoogle', zone: 'durp.info', proxied: 'yes', state: 'present'}
- {record: 'kuma', zone: 'durp.info', proxied: 'yes', state: 'present'}
- {record: 'oauth', zone: 'durp.info', proxied: 'yes', state: 'present'}

7
readme.MD Normal file
View File

@@ -0,0 +1,7 @@
Thanks to
https://github.com/techno-tim/k3s-ansible
When restoring data
kubectl patch pv pvc-08d158a8-2450-4f8c-b8a9-89cada22106a -p '{"spec":{"storageClassName":"longhorn"}}'

View File

@@ -0,0 +1,4 @@
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Download-Upgradeable-Packages "1";
APT::Periodic::AutocleanInterval "7";
APT::Periodic::Unattended-Upgrade "1";

View File

@@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDTYqag8OKcV6kIitn3Axlyi3Xr9EeybG10wlglw34fYF0pY+OERy7zZKEju4ijZzQ7eWNlcXLYSorm5Tngkvnz4vbM4b9R7gZjTV9drSGDo0BLkMXNuSTrKwGeokcNkxh+HZcWSK4/SE5zPzvkPj1UvmAgQ4P4N79mqPe5/9gAvdrlUWEtuqVdEHc/FMk4kEZsRu4lg58KoghNCRYMYHOyd1rbHsuWpX5NumPxnosWG22jzqj46rUWEXvA7MrCGGbUDlk5+/h7Bvw4O8nGZLEo/qyaYvChTBj/UqYYBssC4VlW/SNJB1yfrklqdtcknmFVJBi174cQtzZDXOerwneh8/+t7wWpcxkWscxYrwdJspzAU/NGk02xDPaG4F1mdgZ6HIZCQAaw/EbaNbiuU+bhdngEIHUvVmdiy4T09FWIWuJxO6FnAiVIU5K8LpqGLTFp7kjOwAczdQ+KVojm/1A5W/ZoTE/y3Ni1fVaOJFCxSgU7qiKAm7hb2ZXvznNgryc= ansible

View File

@@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCiUz4GQntmn/btPuGEnk0YWsbpCFqQTh2fEBmUb1UMONhoCc2lhgMaJwDSv9lLX26YCt636A1w04ANsOZycr3ZFGUoEqMU7DT+0A89IKe8kpNT1oIYz6rQQixzY2oNDsWYJUiovEgipccHrj9ry8Ke3/8BtznvVxtmVi0c3gzQiFY76xxeie5A/sXcC3N2YsD0HC1zFYBbj6LG6w4fBVYyYTZy2MjBu+r42GF0YqpDFXykUnG7yrq2j6Vx2LoVMotYNCXBJb6cfb/hN5gnpZYnD0S1Z1m6IfX7snGtHp5uU0UbOFnjVYUCe6h0XBQa2K4KpFiZac69GT/vjyi5sEUfmMyQXH5vrLOZcWdH+abRUk+B4mFmSYxf8514CuhfHX7y7BkCaN41CQr4dbfNLeaQ1jl9RzKieVJb0VYoHYQHZD8AwKKNXM9/DjLK6TIu1q2bN5pE8cweqDUrJSIFXla9ykEdGMXYWFWtUdURpqy0/QTzD8It/W/tauONuwjPu90=

3
roles/base/files/issue Normal file
View File

@@ -0,0 +1,3 @@
Use of this system is restricted to authorized users only, and all use is subjected to an acceptable use policy.
IF YOU ARE NOT AUTHORIZED TO USE THIS SYSTEM, DISCONNECT NOW.

3
roles/base/files/motd Normal file
View File

@@ -0,0 +1,3 @@
THIS SYSTEM IS FOR AUTHORIZED USE ONLY
All activities are logged and monitored.

View File

@@ -0,0 +1,94 @@
# Package generated configuration file
# See the sshd_config(5) manpage for details
# What ports, IPs and protocols we listen for
Port 22
# Use these options to restrict which interfaces/protocols sshd will bind to
#ListenAddress ::
#ListenAddress 0.0.0.0
Protocol 2
# HostKeys for protocol version 2
HostKey /etc/ssh/ssh_host_rsa_key
HostKey /etc/ssh/ssh_host_dsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
HostKey /etc/ssh/ssh_host_ed25519_key
#Privilege Separation is turned on for security
UsePrivilegeSeparation yes
# Lifetime and size of ephemeral version 1 server key
KeyRegenerationInterval 3600
ServerKeyBits 1024
# Logging
SyslogFacility AUTH
LogLevel INFO
# Authentication:
LoginGraceTime 120
PermitRootLogin no
StrictModes yes
RSAAuthentication yes
PubkeyAuthentication yes
#AuthorizedKeysFile %h/.ssh/authorized_keys
# Don't read the user's ~/.rhosts and ~/.shosts files
IgnoreRhosts yes
# For this to work you will also need host keys in /etc/ssh_known_hosts
RhostsRSAAuthentication no
# similar for protocol version 2
HostbasedAuthentication no
# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
#IgnoreUserKnownHosts yes
# To enable empty passwords, change to yes (NOT RECOMMENDED)
PermitEmptyPasswords no
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
ChallengeResponseAuthentication no
# Change to no to disable tunnelled clear text passwords
PasswordAuthentication no
# Kerberos options
#KerberosAuthentication no
#KerberosGetAFSToken no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
X11Forwarding no
X11DisplayOffset 10
PrintMotd no
PrintLastLog yes
TCPKeepAlive yes
#UseLogin no
#MaxStartups 10:30:60
#Banner /etc/issue.net
# Allow client to pass locale environment variables
AcceptEnv LANG LC_*
Subsystem sftp /usr/lib/openssh/sftp-server
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the ChallengeResponseAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via ChallengeResponseAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
UsePAM yes
ClientAliveInterval 300
#enable remote powershell
#Subsystem powershell /usr/bin/pwsh -sshs -NoLogo

View File

@@ -0,0 +1,135 @@
# $OpenBSD: sshd_config,v 1.104 2021/07/02 05:11:21 dtucker Exp $
# This is the sshd server system-wide configuration file. See
# sshd_config(5) for more information.
# This sshd was compiled with PATH=/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin
# The strategy used for options in the default sshd_config shipped with
# OpenSSH is to specify options with their default value where
# possible, but leave them commented. Uncommented options override the
# default value.
# To modify the system-wide sshd configuration, create a *.conf file under
# /etc/ssh/sshd_config.d/ which will be automatically included below
Include /etc/ssh/sshd_config.d/*.conf
# If you want to change the port on a SELinux system, you have to tell
# SELinux about this change.
# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER
#
#Port 22
#AddressFamily any
#ListenAddress 0.0.0.0
#ListenAddress ::
#HostKey /etc/ssh/ssh_host_rsa_key
#HostKey /etc/ssh/ssh_host_ecdsa_key
#HostKey /etc/ssh/ssh_host_ed25519_key
# Ciphers and keying
#RekeyLimit default none
# Logging
#SyslogFacility AUTH
#LogLevel INFO
# Authentication:
#LoginGraceTime 2m
PermitRootLogin no
#StrictModes yes
#MaxAuthTries 6
#MaxSessions 10
PubkeyAuthentication yes
# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2
# but this is overridden so installations will only check .ssh/authorized_keys
AuthorizedKeysFile .ssh/authorized_keys
#AuthorizedPrincipalsFile none
#AuthorizedKeysCommand none
#AuthorizedKeysCommandUser nobody
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
#HostbasedAuthentication no
# Change to yes if you don't trust ~/.ssh/known_hosts for
# HostbasedAuthentication
#IgnoreUserKnownHosts no
# Don't read the user's ~/.rhosts and ~/.shosts files
#IgnoreRhosts yes
# To disable tunneled clear text passwords, change to no here!
#PasswordAuthentication yes
PermitEmptyPasswords no
# Change to no to disable s/key passwords
#KbdInteractiveAuthentication yes
# Kerberos options
#KerberosAuthentication no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
#KerberosGetAFSToken no
#KerberosUseKuserok yes
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
#GSSAPIStrictAcceptorCheck yes
#GSSAPIKeyExchange no
#GSSAPIEnablek5users no
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the KbdInteractiveAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via KbdInteractiveAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and KbdInteractiveAuthentication to 'no'.
# WARNING: 'UsePAM no' is not supported in Fedora and may cause several
# problems.
#UsePAM no
#AllowAgentForwarding yes
#AllowTcpForwarding yes
#GatewayPorts no
#X11Forwarding no
#X11DisplayOffset 10
#X11UseLocalhost yes
#PermitTTY yes
#PrintMotd yes
#PrintLastLog yes
#TCPKeepAlive yes
#PermitUserEnvironment no
#Compression delayed
ClientAliveInterval 300
#ClientAliveCountMax 3
#UseDNS no
#PidFile /var/run/sshd.pid
#MaxStartups 10:30:100
#PermitTunnel no
#ChrootDirectory none
#VersionAddendum none
# no default banner path
#Banner none
# override default of no subsystems
Subsystem sftp /usr/libexec/openssh/sftp-server
# Example of overriding settings on a per-user basis
#Match User anoncvs
# X11Forwarding no
# AllowTcpForwarding no
# PermitTTY no
# ForceCommand cvs server
PasswordAuthentication no
#enable remote powershell
#Subsystem powershell /usr/bin/pwsh -sshs -NoLogo

82
roles/base/tasks/main.yml Normal file
View File

@@ -0,0 +1,82 @@
---
- name: Run Package tasks
include_tasks:
file: ./templates/packages.yml
- name: Create user account
user:
name: "{{ username }}"
password: "{{ userpassword }}"
groups: sudo
shell: /bin/bash
state: present
createhome: yes
when: ansible_os_family == "Debian"
- name: Create user account
user:
name: "{{ username }}"
password: "{{ userpassword }}"
shell: /bin/bash
groups: wheel
state: present
createhome: yes
when: ansible_os_family == "RedHat"
- name: Run SSH tasks
include_tasks:
file: ssh.yml
- name: Copy unattended-upgrades file
copy:
src: files/10periodic
dest: /etc/apt/apt.conf.d/10periodic
owner: root
group: root
mode: "0644"
force: yes
when: ansible_os_family == "Debian"
- name: Remove undesirable packages
package:
name: "{{ unnecessary_software }}"
state: absent
when: ansible_os_family == "Debian"
- name: Stop and disable unnecessary services
service:
name: "{{ item }}"
state: stopped
enabled: no
with_items: "{{ unnecessary_services }}"
ignore_errors: yes
- name: Set a message of the day
copy:
dest: /etc/motd
src: files/motd
owner: root
group: root
mode: 0644
- name: Set a login banner
copy:
dest: "{{ item }}"
src: files/issue
owner: root
group: root
mode: 0644
with_items:
- /etc/issue
- /etc/issue.net
- name: set timezone
shell: timedatectl set-timezone America/Chicago
- name: Enable cockpit
systemd:
name: cockpit
daemon_reload: yes
state: restarted
enabled: yes
when: ansible_os_family == "RedHat"

47
roles/base/tasks/ssh.yml Normal file
View File

@@ -0,0 +1,47 @@
- name: Deploy SSH Key (administrator)
copy:
dest: /home/administrator/.ssh/authorized_keys
src: files/authorized_keys_administrator
force: true
- name: ensure ssh folder exists for user
file:
path: /home/user/.ssh
state: directory
- name: Deploy SSH Key (user)
copy:
dest: /home/user/.ssh/authorized_keys
src: files/authorized_keys_user
force: true
- name: Remove Root SSH Configuration
file:
path: /root/.ssh
state: absent
- name: Copy Secured SSHD Configuration
copy:
src: files/sshd_config_secured
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: "0644"
when: ansible_os_family == "Debian"
- name: Copy Secured SSHD Configuration
copy:
src: files/sshd_config_secured_redhat
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: "0644"
when: ansible_os_family == "RedHat"
- name: Restart SSHD
systemd:
name: sshd
daemon_reload: yes
state: restarted
enabled: yes
ignore_errors: yes

25
roles/base/vars/main.yml Normal file
View File

@@ -0,0 +1,25 @@
---
required_packages:
- ufw
- qemu-guest-agent
- fail2ban
- unattended-upgrades
- cockpit
- nfs-common
- open-iscsi
redhat_required_packages:
- qemu-guest-agent
- cockpit
- iscsi-initiator-utils
unnecessary_services:
- postfix
- telnet
unnecessary_software:
- tcpdump
- nmap-ncat
- wpa_supplicant

View File

@@ -0,0 +1,11 @@
---
- name: Update Cloudflare
community.general.cloudflare_dns:
zone: "{{ item.zone }}"
record: "{{ item.record }}"
state: "{{ item.state }}"
type: A
proxied: "{{ item.proxied }}"
value: "{{ lookup('env','external_ip') }}"
api_token: "{{ lookup('env','cloudflareapi') }}"
with_items: "{{ DNS }}"

View File

@@ -0,0 +1,29 @@
---
- name: copy configs
copy:
src: ./roles/k3s/argocd/templates/
dest: /opt/argocd
owner: administrator
group: administrator
mode: "0664"
force: yes
- name: copy configs
copy:
src: "{{ lookup('env','kubeseal') }}"
dest: /opt/kubeseal.yaml
owner: administrator
group: administrator
mode: "0600"
force: yes
- name: Apply Kubeseal master key
command: k3s kubectl apply -f /opt/kubeseal.yaml --force
- name: Apply ArgoCD
command: k3s kubectl apply -f /opt/argocd/argocd.yaml -n argocd
- name: Apply ArgoCD Apps
command: k3s kubectl apply -f /opt/argocd/apps.yaml -n argocd

View File

@@ -0,0 +1,34 @@
apiVersion: v1
kind: Secret
metadata:
name: test-app
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
stringData:
url: https://gitlab.com/infrastructure-as-code5/infrastructure.git
password: 3p4MGtXAk3sYwQDDXQrp
username: gitlab+deploy-token-957909
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argocd
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/infrastructure-as-code5/infrastructure.git
targetRevision: main
path: argocd/argocd
directory:
recurse: true
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
selfHeal: true

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,10 @@
---
- name: Download k3s binary x64
get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
when: ansible_facts.architecture == "x86_64"

View File

@@ -0,0 +1,12 @@
---
ansible_user: root
server_init_args: >-
{% if groups['master'] | length > 1 %}
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
--cluster-init
{% else %}
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip }}:6443
{% endif %}
--token {{ k3s_token }}
{% endif %}
{{ extra_server_args | default('') }}

View File

@@ -0,0 +1,197 @@
---
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy vip rbac manifest to first master
template:
src: "vip.rbac.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy vip manifest to first master
template:
src: "vip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
# these will be copied and installed now, then tested later and apply config
- name: Copy metallb namespace to first master
template:
src: "metallb.namespace.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy metallb namespace to first master
template:
src: "metallb.crds.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Init cluster inside the transient k3s-init service
command:
cmd: "systemd-run -p RestartSec=2 \
-p Restart=on-failure \
--unit=k3s-init \
k3s server {{ server_init_args }}"
creates: "{{ systemd_dir }}/k3s.service"
args:
warn: false # The ansible systemd module does not support transient units
- name: Verification
block:
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
command:
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
register: nodes
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length)
retries: "{{ retry_count | default(20) }}"
delay: 10
changed_when: false
always:
- name: Save logs of k3s-init.service
include_tasks: fetch_k3s_init_logs.yml
when: log_destination
vars:
log_destination: >-
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
- name: Kill the temporary service used for initialization
systemd:
name: k3s-init
state: stopped
failed_when: false
when: not ansible_check_mode
- name: Copy K3s service file
register: k3s_service
template:
src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s.service"
owner: root
group: root
mode: 0644
- name: Enable and check K3s service
systemd:
name: k3s
daemon_reload: yes
state: restarted
enabled: yes
- name: Wait for node-token
wait_for:
path: /var/lib/rancher/k3s/server/node-token
- name: Register node-token file access mode
stat:
path: /var/lib/rancher/k3s/server
register: p
- name: Change file access node-token
file:
path: /var/lib/rancher/k3s/server
mode: "g+rx,o+rx"
- name: Read node-token from master
slurp:
src: /var/lib/rancher/k3s/server/node-token
register: node_token
- name: Store Master node-token
set_fact:
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
- name: Restore node-token file access
file:
path: /var/lib/rancher/k3s/server
mode: "{{ p.stat.mode }}"
- name: Create directory .kube
file:
path: ~{{ item }}/.kube
state: directory
owner: "{{ item }}"
mode: "u=rwx,g=rx,o="
loop:
- "{{ ansible_user }}"
- "{{ username }}"
- name: Copy config file to user home directory
copy:
src: /etc/rancher/k3s/k3s.yaml
dest: ~{{ item }}/.kube/config
remote_src: yes
owner: "{{ item }}"
mode: "u=rw,g=,o="
loop:
- "{{ ansible_user }}"
- "{{ username }}"
- name: Configure kubectl cluster to {{ endpoint_url }}
command: >-
k3s kubectl config set-cluster default
--server={{ endpoint_url }}
--kubeconfig ~{{ item }}/.kube/config
changed_when: true
loop:
- "{{ ansible_user }}"
- "{{ username }}"
vars:
endpoint_url: >-
https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]
- name: Create kubectl symlink
file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/kubectl
state: link
- name: Create crictl symlink
file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/crictl
state: link
- name: Get contents of manifests folder
find:
paths: /var/lib/rancher/k3s/server/manifests
file_type: file
register: k3s_server_manifests
- name: Get sub dirs of manifests folder
find:
paths: /var/lib/rancher/k3s/server/manifests
file_type: directory
register: k3s_server_manifests_directories
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
file:
path: "{{ item.path }}"
state: absent
with_items:
- "{{ k3s_server_manifests.files }}"
- "{{ k3s_server_manifests_directories.files }}"
loop_control:
label: "{{ item.path }}"

View File

@@ -0,0 +1,5 @@
{#
This is a really simple template that just outputs the
value of the "content" variable.
#}
{{ content }}

View File

@@ -0,0 +1,24 @@
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
After=network-online.target
[Service]
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s server {{ extra_server_args | default("") }}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- {{ metal_lb_ip_range }}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: metallb-system
labels:
app: metallb

View File

@@ -0,0 +1,32 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-vip
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: system:kube-vip-role
rules:
- apiGroups: [""]
resources: ["services", "services/status", "nodes", "endpoints"]
verbs: ["list","get","watch", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["list", "get", "watch", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-vip-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-vip-role
subjects:
- kind: ServiceAccount
name: kube-vip
namespace: kube-system

View File

@@ -0,0 +1,77 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-vip-ds
namespace: kube-system
spec:
selector:
matchLabels:
name: kube-vip-ds
template:
metadata:
labels:
name: kube-vip-ds
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_interface
value: {{ flannel_iface }}
- name: vip_cidr
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: svc_enable
value: "false"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "15"
- name: vip_renewdeadline
value: "10"
- name: vip_retryperiod
value: "2"
- name: address
value: {{ apiserver_endpoint }}
image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_tag_version }}
imagePullPolicy: Always
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_TIME
hostNetwork: true
serviceAccountName: kube-vip
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
updateStrategy: {}
status:
currentNumberScheduled: 0
desiredNumberScheduled: 0
numberMisscheduled: 0
numberReady: 0

View File

@@ -0,0 +1,16 @@
---
- name: Copy K3s service file
template:
src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s-node.service"
owner: root
group: root
mode: 0755
- name: Enable and check K3s service
systemd:
name: k3s-node
daemon_reload: yes
state: restarted
enabled: yes

View File

@@ -0,0 +1,24 @@
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
After=network-online.target
[Service]
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,3 @@
---
# Timeout to wait for MetalLB services to come up
metal_lb_available_timeout: 120s

View File

@@ -0,0 +1,94 @@
---
- name: Create manifests directory for temp configuration
file:
path: /tmp/k3s
state: directory
owner: "{{ ansible_user }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
- name: Copy metallb CRs manifest to first master
template:
src: "metallb.crs.j2"
dest: "/tmp/k3s/metallb-crs.yaml"
owner: "{{ ansible_user }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
- name: Test metallb-system namespace
command: >-
k3s kubectl -n metallb-system
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for MetalLB resources
command: >-
k3s kubectl wait {{ item.resource }}
--namespace='metallb-system'
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
{% if item.condition | default(False) -%}{{ item.condition }}{%- endif %}
--timeout='{{ metal_lb_available_timeout }}'
changed_when: false
run_once: true
with_items:
- description: controller
resource: deployment
name: controller
condition: --for condition=Available=True
- description: webhook service
resource: pod
selector: component=controller
condition: --for=jsonpath='{.status.phase}'=Running
- description: pods in replica sets
resource: pod
selector: component=controller,app=metallb
condition: --for condition=Ready
- description: ready replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.readyReplicas}'=1
- description: fully labeled replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1
- description: available replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.availableReplicas}'=1
loop_control:
label: "{{ item.description }}"
- name: Test metallb-system webhook-service endpoint
command: >-
k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Apply metallb CRs
command: >-
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
--timeout='{{ metal_lb_available_timeout }}'
register: this
changed_when: false
run_once: true
until: this.rc == 0
retries: 5
- name: Test metallb-system resources
command: >-
k3s kubectl -n metallb-system get {{ item }}
changed_when: false
run_once: true
with_items:
- IPAddressPool
- L2Advertisement
- name: Remove tmp directory used for manifests
file:
path: /tmp/k3s
state: absent

View File

@@ -0,0 +1,21 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: first-pool
namespace: metallb-system
spec:
addresses:
{% if metal_lb_ip_range is string %}
{# metal_lb_ip_range was used in the legacy way: single string instead of a list #}
{# => transform to list with single element #}
{% set metal_lb_ip_range = [metal_lb_ip_range] %}
{% endif %}
{% for range in metal_lb_ip_range %}
- {{ range }}
{% endfor %}
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: default
namespace: metallb-system

View File

@@ -0,0 +1,65 @@
---
- name: Set same timezone on every Server
timezone:
name: "{{ system_timezone }}"
when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
- name: Set SELinux to disabled state
selinux:
state: disabled
when: ansible_os_family == "RedHat"
- name: Enable IPv4 forwarding
sysctl:
name: net.ipv4.ip_forward
value: "1"
state: present
reload: yes
- name: Enable IPv6 forwarding
sysctl:
name: net.ipv6.conf.all.forwarding
value: "1"
state: present
reload: yes
- name: Enable IPv6 router advertisements
sysctl:
name: net.ipv6.conf.all.accept_ra
value: "2"
state: present
reload: yes
- name: Add br_netfilter to /etc/modules-load.d/
copy:
content: "br_netfilter"
dest: /etc/modules-load.d/br_netfilter.conf
mode: "u=rw,g=,o="
when: ansible_os_family == "RedHat"
- name: Load br_netfilter
modprobe:
name: br_netfilter
state: present
when: ansible_os_family == "RedHat"
- name: Set bridge-nf-call-iptables (just to be sure)
sysctl:
name: "{{ item }}"
value: "1"
state: present
reload: yes
when: ansible_os_family == "RedHat"
loop:
- net.bridge.bridge-nf-call-iptables
- net.bridge.bridge-nf-call-ip6tables
- name: Add /usr/local/bin to sudo secure_path
lineinfile:
line: 'Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
regexp: "Defaults(\\s)*secure_path(\\s)*="
state: present
insertafter: EOF
path: /etc/sudoers
validate: 'visudo -cf %s'
when: ansible_os_family == "RedHat"

53
templates/packages.yml Normal file
View File

@@ -0,0 +1,53 @@
---
- name: Update packages
apt:
name: '*'
state: latest
update_cache: yes
only_upgrade: yes
when: ansible_os_family == "Debian"
retries: 3
delay: 10
- name: Remove packages not needed anymore
apt:
autoremove: yes
when: ansible_os_family == "Debian"
retries: 3
delay: 10
- name: Install required packages Debian
apt:
state: latest
pkg: "{{ item }}"
with_items: "{{ required_packages }}"
when: ansible_os_family == "Debian"
retries: 3
delay: 10
- name: Update packages RedHat
yum:
name: '*'
state: latest
update_cache: yes
update_only: yes
when: ansible_os_family == "RedHat"
retries: 3
delay: 10
- name: Remove packates not needed anymore
yum:
autoremove: yes
when: ansible_os_family == "RedHat"
retries: 3
delay: 10
- name: Install required packages RedHat
yum:
state: latest
update_cache: yes
pkg: "{{ item }}"
with_items: "{{ redhat_required_packages }}"
when: ansible_os_family == "RedHat"
retries: 3
delay: 10

146
terraform.tf Normal file
View File

@@ -0,0 +1,146 @@
#------------------------------------------------------
#Defaults
terraform {
backend "http" {}
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "~> 2.9.11"
}
}
}
provider "proxmox" {
pm_parallel = 3
pm_tls_insecure = true
pm_api_url = var.pm_api_url
pm_user = var.pm_user
pm_password = var.pm_password
pm_debug = false
}
variable "pm_api_url" {}
variable "pm_api_token_id" {}
variable "pm_api_token_secret" {}
variable "dnsserver" {}
variable "sshkeys" {}
variable "pm_password" {}
variable "pm_user" {}
#k3s
#------------------------------------------------------
variable "k3master" {
type = object({
count = number
name = list(string)
cores = number
memory = number
drive = string
storage = string
template = string
node = string
tag = number
ip = list(number)
})
}
variable "k3server" {
type = object({
count = number
name = list(string)
cores = list(number)
memory = list(number)
drive = list(string)
storage = list(string)
template = string
node = string
tag = number
ip = list(number)
})
}
resource "proxmox_vm_qemu" "k3master" {
count = var.k3master.count
ciuser = "administrator"
vmid = "${var.k3master.tag}${var.k3master.ip[count.index]}"
name = var.k3master.name[count.index]
target_node = var.k3master.node
clone = var.k3master.template
full_clone = true
os_type = "cloud-init"
agent = 1
cores = var.k3master.cores
sockets = 1
cpu = "host"
memory = var.k3master.memory
scsihw = "virtio-scsi-pci"
bootdisk = "scsi0"
boot = "c"
onboot = true
disk {
size = var.k3master.drive
type = "scsi"
storage = var.k3master.storage
ssd = 0
backup = 0
}
network {
model = "virtio"
bridge = "vmbr1"
tag = var.k3master.tag
}
lifecycle {
ignore_changes = [
network,
]
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${var.k3master.tag}.${var.k3master.ip[count.index]}/24,gw=192.168.${var.k3master.tag}.1"
searchdomain = "durp.loc"
nameserver = "${var.dnsserver}"
sshkeys = "${var.sshkeys}"
}
resource "proxmox_vm_qemu" "k3server" {
count = var.k3server.count
ciuser = "administrator"
vmid = "${var.k3server.tag}${var.k3server.ip[count.index]}"
name = var.k3server.name[count.index]
target_node = var.k3server.node
clone = var.k3server.template
full_clone = true
os_type = "cloud-init"
agent = 1
cores = var.k3server.cores[count.index]
sockets = 1
cpu = "host"
memory = var.k3server.memory[count.index]
scsihw = "virtio-scsi-pci"
bootdisk = "scsi0"
boot = "c"
onboot = true
disk {
size = var.k3server.drive[count.index]
type = "scsi"
storage = var.k3server.storage[count.index]
ssd = 1
backup = 0
}
network {
model = "virtio"
bridge = "vmbr1"
tag = var.k3server.tag
}
lifecycle {
ignore_changes = [
network,
]
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${var.k3server.tag}.${var.k3server.ip[count.index]}/24,gw=192.168.${var.k3server.tag}.1"
searchdomain = "durp.loc"
nameserver = "${var.dnsserver}"
sshkeys = "${var.sshkeys}"
}

28
terraform.tfvars Normal file
View File

@@ -0,0 +1,28 @@
dnsserver = "192.168.20.1"
sshkeys = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDTYqag8OKcV6kIitn3Axlyi3Xr9EeybG10wlglw34fYF0pY+OERy7zZKEju4ijZzQ7eWNlcXLYSorm5Tngkvnz4vbM4b9R7gZjTV9drSGDo0BLkMXNuSTrKwGeokcNkxh+HZcWSK4/SE5zPzvkPj1UvmAgQ4P4N79mqPe5/9gAvdrlUWEtuqVdEHc/FMk4kEZsRu4lg58KoghNCRYMYHOyd1rbHsuWpX5NumPxnosWG22jzqj46rUWEXvA7MrCGGbUDlk5+/h7Bvw4O8nGZLEo/qyaYvChTBj/UqYYBssC4VlW/SNJB1yfrklqdtcknmFVJBi174cQtzZDXOerwneh8/+t7wWpcxkWscxYrwdJspzAU/NGk02xDPaG4F1mdgZ6HIZCQAaw/EbaNbiuU+bhdngEIHUvVmdiy4T09FWIWuJxO6FnAiVIU5K8LpqGLTFp7kjOwAczdQ+KVojm/1A5W/ZoTE/y3Ni1fVaOJFCxSgU7qiKAm7hb2ZXvznNgryc="
k3master = {
count = "3"
name = ["master01", "master02", "master03"]
cores = "2"
memory = "4096"
drive = "20G"
storage = "domains"
template = "CentOS9-Template"
node = "overlord"
tag = "20"
ip = ["121", "122", "123"]
}
k3server = {
count = "3"
name = ["node01", "node02", "node03"]
cores = ["4", "4", "4"]
memory = ["8192","8192","8192"]
drive = ["145G","145G","145G"]
storage = ["NVMeSSD", "NVMeSSD", "NVMeSSD"]
template = "CentOS9-Template"
node = "overlord"
tag = "20"
ip = ["124", "125", "126"]
}

32
vm_templates/cloudinit.sh Normal file
View File

@@ -0,0 +1,32 @@
export URL="https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20220425.0.x86_64.qcow2"
export NAME="centos9.qcow2"
export VM="CentOS9-Template"
export VMID="99999"
echo Downloading Image
wget $URL -O $NAME
echo adding qemu agent
virt-customize -a $NAME --install qemu-guest-agent
echo setting up VM
qm create $VMID --name $VM --memory 14336 --cores 6 -cpu host --net0 virtio,bridge=vmbr0
qm importdisk $VMID $NAME domains
qm set $VMID --scsihw virtio-scsi-pci --scsi0 domains:vm-$VMID-disk-0,ssd=0
qm set $VMID --boot c --bootdisk scsi0
qm set $VMID --ide2 domains:cloudinit
qm set $VMID --serial0 socket --vga serial0
qm set $VMID --agent enabled=1
qm set $VMID --nameserver 192.168.20.1
qm set $VMID --searchdomain durp.loc
qm set $VMID --ciuser administrator
echo Converting to Template
qm template $ID
echo Deleting image
rm $NAME