31 Commits

Author SHA1 Message Date
c5e1681c5e update 2024-08-07 05:48:59 -05:00
3caef94aa2 update 2024-08-07 05:48:43 -05:00
d1b5b53626 update 2024-08-07 05:47:57 -05:00
1fb05b911d update 2024-08-07 05:46:58 -05:00
35e46caf8e update 2024-08-07 05:35:50 -05:00
1488f5c3bd update 2024-08-07 05:35:03 -05:00
6d743b8812 update 2024-08-07 05:34:15 -05:00
23a5ebef45 update 2024-08-07 05:33:45 -05:00
c904e0260a update 2024-08-07 05:33:23 -05:00
b4adf6bfe1 update 2024-08-07 05:28:09 -05:00
51c5eed833 update 2024-08-07 05:26:28 -05:00
f308a5c672 update 2024-08-06 05:11:29 -05:00
305a418382 update 2024-08-06 04:55:02 -05:00
86d3fe8b38 update 2024-08-05 05:04:11 -05:00
3df7190f90 update 2024-08-05 04:59:37 -05:00
27dd5ed7b7 update 2024-08-05 04:58:12 -05:00
d5d746743c test secret in same namespace 2024-08-05 04:52:21 -05:00
b794d2945f update 2024-08-05 04:51:46 -05:00
26b4774589 update 2024-08-04 10:53:35 -05:00
16ff689f49 update 2024-08-04 10:47:47 -05:00
45bf00db84 update 2024-08-04 10:40:35 -05:00
0045af169d update 2024-08-04 10:39:19 -05:00
3fe64ede97 update 2024-08-04 10:36:59 -05:00
e4afc699b3 update 2024-08-04 10:35:13 -05:00
aacf1e8656 update 2024-08-04 10:34:29 -05:00
3c3bd1bfa0 update 2024-08-04 10:00:54 -05:00
654f2b4d85 update 2024-08-04 09:53:37 -05:00
1c04237918 update 2024-08-04 09:48:25 -05:00
a8a881dd7c update 2024-08-04 09:44:38 -05:00
43579b8f6e update 2024-08-04 09:34:19 -05:00
e0eaa1a96c update 2024-08-04 09:33:10 -05:00
234 changed files with 522 additions and 7784 deletions

2
.gitignore vendored
View File

@@ -1,3 +1 @@
.idea
infra/terraform/.terraform
infra/terraform/.terraform.lock.hcl

View File

@@ -1,3 +0,0 @@
include:
- local: infra/.gitlab/.gitlab-ci.yml
- local: dmz/.gitlab/.gitlab-ci.yml

View File

@@ -1,4 +0,0 @@
VAULT_HELM_SECRET_NAME=$(kubectl get secrets -n vault --output=json | jq -r '.items[].metadata | select(.name|startswith("vault-token-")).name')
TOKEN_REVIEW_JWT=$(kubectl get secret $VAULT_HELM_SECRET_NAME -n vault --output='go-template={{ .data.token }}' | base64 --decode)
KUBE_CA_CERT=$(kubectl config view --raw --minify --flatten --output='jsonpath={.clusters[].cluster.certificate-authority-data}' | base64 --decode)
KUBE_HOST=$(kubectl config view --raw --minify --flatten --output='jsonpath={.clusters[].cluster.server}')

View File

@@ -1,5 +0,0 @@
- hosts: all
gather_facts: yes
become: yes
roles:
- base

View File

@@ -1,4 +0,0 @@
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Download-Upgradeable-Packages "1";
APT::Periodic::AutocleanInterval "7";
APT::Periodic::Unattended-Upgrade "1";

View File

@@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGhPVgL8gXdRTw0E2FvlOUoUI4vd794nB0nZVIsc+U5M

View File

@@ -1,4 +0,0 @@
Use of this system is restricted to authorized users only, and all use is subjected to an acceptable use policy.
IF YOU ARE NOT AUTHORIZED TO USE THIS SYSTEM, DISCONNECT NOW.

View File

@@ -1,4 +0,0 @@
THIS SYSTEM IS FOR AUTHORIZED USE ONLY
All activities are logged and monitored.

View File

@@ -1,95 +0,0 @@
# Package generated configuration file
# See the sshd_config(5) manpage for details
# What ports, IPs and protocols we listen for
Port 22
# Use these options to restrict which interfaces/protocols sshd will bind to
#ListenAddress ::
#ListenAddress 0.0.0.0
Protocol 2
# HostKeys for protocol version 2
HostKey /etc/ssh/ssh_host_rsa_key
HostKey /etc/ssh/ssh_host_dsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
HostKey /etc/ssh/ssh_host_ed25519_key
#Privilege Separation is turned on for security
UsePrivilegeSeparation yes
# Lifetime and size of ephemeral version 1 server key
KeyRegenerationInterval 3600
ServerKeyBits 1024
# Logging
SyslogFacility AUTH
LogLevel INFO
# Authentication:
LoginGraceTime 120
PermitRootLogin no
StrictModes yes
RSAAuthentication yes
PubkeyAuthentication yes
#AuthorizedKeysFile %h/.ssh/authorized_keys
# Don't read the user's ~/.rhosts and ~/.shosts files
IgnoreRhosts yes
# For this to work you will also need host keys in /etc/ssh_known_hosts
RhostsRSAAuthentication no
# similar for protocol version 2
HostbasedAuthentication no
# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
#IgnoreUserKnownHosts yes
# To enable empty passwords, change to yes (NOT RECOMMENDED)
PermitEmptyPasswords no
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
ChallengeResponseAuthentication no
# Change to no to disable tunnelled clear text passwords
PasswordAuthentication no
# Kerberos options
#KerberosAuthentication no
#KerberosGetAFSToken no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
X11Forwarding no
X11DisplayOffset 10
PrintMotd no
PrintLastLog yes
TCPKeepAlive yes
#UseLogin no
#MaxStartups 10:30:60
#Banner /etc/issue.net
# Allow client to pass locale environment variables
AcceptEnv LANG LC_*
Subsystem sftp /usr/lib/openssh/sftp-server
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the ChallengeResponseAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via ChallengeResponseAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
UsePAM yes
ClientAliveInterval 300
#enable remote powershell
#Subsystem powershell /usr/bin/pwsh -sshs -NoLogo

View File

@@ -1,143 +0,0 @@
- name: Update packages
apt:
name: '*'
state: latest
update_cache: yes
only_upgrade: yes
retries: 300
delay: 10
- name: Remove packages not needed anymore
apt:
autoremove: yes
retries: 300
delay: 10
- name: Install required packages Debian
apt:
state: latest
pkg: "{{ item }}"
with_items: "{{ required_packages }}"
retries: 300
delay: 10
- name: Create user account
user:
name: "user"
shell: /bin/bash
state: present
createhome: yes
- name: ensure ssh folder exists for user
file:
path: /home/user/.ssh
owner: user
group: user
mode: "0700"
state: directory
- name: Deploy SSH Key (user)
copy:
dest: /home/user/.ssh/authorized_keys
src: files/authorized_keys_user
owner: user
group: user
force: true
- name: Remove Root SSH Configuration
file:
path: /root/.ssh
state: absent
- name: Copy Secured SSHD Configuration
copy:
src: files/sshd_config_secured
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: "0644"
when: ansible_os_family == "Debian"
- name: Copy Secured SSHD Configuration
copy:
src: files/sshd_config_secured_redhat
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: "0644"
when: ansible_os_family == "RedHat"
- name: Restart SSHD
systemd:
name: sshd
daemon_reload: yes
state: restarted
enabled: yes
ignore_errors: yes
- name: Copy unattended-upgrades file
copy:
src: files/10periodic
dest: /etc/apt/apt.conf.d/10periodic
owner: root
group: root
mode: "0644"
force: yes
when: ansible_os_family == "Debian"
- name: Remove undesirable packages
package:
name: "{{ unnecessary_software }}"
state: absent
when: ansible_os_family == "Debian"
- name: Stop and disable unnecessary services
service:
name: "{{ item }}"
state: stopped
enabled: no
with_items: "{{ unnecessary_services }}"
ignore_errors: yes
- name: Set a message of the day
copy:
dest: /etc/motd
src: files/motd
owner: root
group: root
mode: 0644
- name: Set a login banner
copy:
dest: "{{ item }}"
src: files/issue
owner: root
group: root
mode: 0644
with_items:
- /etc/issue
- /etc/issue.net
- name: set timezone
shell: timedatectl set-timezone America/Chicago
- name: Enable cockpit
systemd:
name: cockpit
daemon_reload: yes
state: restarted
enabled: yes
- name: change password
ansible.builtin.user:
name: "user"
state: present
password: "{{ lookup('ansible.builtin.env', 'USER_PASSWORD') | password_hash('sha512') }}"
- name: add user to sudoers
community.general.sudoers:
name: user
state: present
user: user
commands: ALL

View File

@@ -1,17 +0,0 @@
required_packages:
- ufw
- qemu-guest-agent
- fail2ban
- unattended-upgrades
- cockpit
- nfs-common
- open-iscsi
unnecessary_services:
- postfix
- telnet
unnecessary_software:
- tcpdump
- nmap-ncat
- wpa_supplicant

View File

@@ -9,6 +9,6 @@ appVersion: "1.16.0"
dependencies:
- name: argo-cd
repository: https://argoproj.github.io/argo-helm
version: 6.11.1
version: 6.7.11

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/internalproxy
targetRevision: dmz
path: internalproxy
directory:
recurse: true
destination:

View File

@@ -1,16 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: durpot
name: argocd
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/durpot
targetRevision: dmz
path: argocd
destination:
namespace: durpot
namespace: argocd
name: in-cluster
syncPolicy:
automated:

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/cert-manager
targetRevision: dmz
path: cert-manager
destination:
namespace: cert-manager
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/external-dns
targetRevision: dmz
path: external-dns
destination:
namespace: external-dns
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/external-secrets
targetRevision: dmz
path: external-secrets
destination:
namespace: external-secrets
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/gatekeeper
targetRevision: dmz
path: gatekeeper
destination:
namespace: gatekeeper
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/gitlab-runner
targetRevision: dmz
path: gitlab-runner
destination:
namespace: gitlab-runner
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/kube-prometheus-stack
targetRevision: dmz
path: kube-prometheus-stack
destination:
namespace: kube-prometheus-stack
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/kubeclarity
targetRevision: dmz
path: kubeclarity
destination:
namespace: kubeclarity
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/longhorn
targetRevision: dmz
path: longhorn
destination:
namespace: longhorn-system
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/metallb-system
targetRevision: dmz
path: metallb-system
destination:
namespace: metallb-system
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/traefik
targetRevision: dmz
path: traefik
destination:
namespace: traefik
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/uptimekuma
targetRevision: dmz
path: uptimekuma
directory:
recurse: true
destination:

View File

@@ -1,16 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: authentik
name: vault
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/authentik
targetRevision: dmz
path: vault
destination:
namespace: authentik
namespace: vault
name: in-cluster
syncPolicy:
automated:
@@ -18,4 +18,8 @@ spec:
selfHeal: true
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- group: admissionregistration.k8s.io
kind: MutatingWebhookConfiguration
jqPathExpressions:
- .webhooks[]?.clientConfig.caBundle

View File

@@ -8,4 +8,4 @@ appVersion: 0.0.1
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.15.3
version: 1.*.*

View File

@@ -1,95 +0,0 @@
stages:
- plan
- apply
- destroy
variables:
WORKDIR: $CI_PROJECT_DIR/dmz/terraform
GITLAB_TF_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/dmz
image:
name: registry.durp.info/opentofu/opentofu:latest
entrypoint: [""]
.tf-init:
before_script:
- cd $WORKDIR
- tofu init
-reconfigure
-backend-config="address=${GITLAB_TF_ADDRESS}"
-backend-config="lock_address=${GITLAB_TF_ADDRESS}/lock"
-backend-config="unlock_address=${GITLAB_TF_ADDRESS}/lock"
-backend-config="username=gitlab-ci-token"
-backend-config="password=${CI_JOB_TOKEN}"
-backend-config="lock_method=POST"
-backend-config="unlock_method=DELETE"
-backend-config="retry_wait_min=5"
format:
stage: .pre
allow_failure: false
script:
- cd $WORKDIR
- tofu fmt -diff -check -write=false
rules:
- changes:
- "dmz/terraform/*.tf"
validate:
stage: .pre
allow_failure: false
extends: .tf-init
script:
- tofu validate
rules:
- changes:
- "dmz/terraform/*.tf"
plan-dmz-infrastructure:
stage: plan
variables:
PLAN: plan.tfplan
JSON_PLAN_FILE: tfplan.json
ENVIRONMENT_NAME: dmz
allow_failure: false
extends: .tf-init
script:
- apk add --update curl jq
- alias convert_report="jq -r '([.resource_changes[].change.actions?]|flatten)|{\"create\":(map(select(.==\"create\"))|length),\"update\":(map(select(.==\"update\"))|length),\"delete\":(map(select(.==\"delete\"))|length)}'"
- tofu plan -out=$PLAN $ARGUMENTS
- tofu show --json $PLAN | jq -r '([.resource_changes[].change.actions?]|flatten)|{"create":(map(select(.=="create"))|length),"update":(map(select(.=="update"))|length),"delete":(map(select(.=="delete"))|length)}' > $JSON_PLAN_FILE
artifacts:
reports:
terraform: $WORKDIR/$JSON_PLAN_FILE
needs: ["validate","format"]
rules:
- changes:
- "dmz/terraform/*.tf"
apply-dmz-infrastructure:
stage: apply
variables:
ENVIRONMENT_NAME: dmz
allow_failure: false
extends: .tf-init
script:
- tofu apply -auto-approve $ARGUMENTS
rules:
- changes:
- "dmz/terraform/*.tf"
when: manual
needs: ["plan-dmz-infrastructure"]
destroy-dmz-infrastructure:
stage: destroy
variables:
ENVIRONMENT_NAME: dmz
allow_failure: false
extends: .tf-init
script:
- tofu destroy -auto-approve $ARGUMENTS
rules:
- changes:
- "dmz/terraform/*.tf"
when: manual
needs: ["plan-dmz-infrastructure"]

View File

@@ -1,11 +0,0 @@
apiVersion: v2
name: cert-manager
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.16.3

View File

@@ -1,6 +0,0 @@
#apiVersion: v1
#kind: ServiceAccount
#metadata:
# name: issuer
#secrets:
# - name: issuer-token-lmzpj

File diff suppressed because one or more lines are too long

View File

@@ -1,22 +0,0 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: cloudflare-api-token-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: cloudflare-api-token-secret
data:
- secretKey: cloudflare-api-token-secret
remoteRef:
key: kv/cert-manager
property: cloudflare-api-token-secret
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault

View File

@@ -1,26 +0,0 @@
cert-manager:
crds:
enabled: true
image:
registry: registry.internal.durp.info
repository: jetstack/cert-manager-controller
pullPolicy: Always
replicaCount: 3
extraArgs:
- --dns01-recursive-nameservers=1.1.1.1:53,1.0.0.1:53
- --dns01-recursive-nameservers-only
podDnsPolicy: None
podDnsConfig:
nameservers:
- "1.1.1.1"
- "1.0.0.1"
webhook:
image:
registry: registry.internal.durp.info
repository: jetstack/cert-manager-webhook
pullPolicy: Always
cainjector:
image:
registry: registry.internal.durp.info
repository: jetstack/cert-manager-cainjector
pullPolicy: Always

View File

@@ -1,11 +0,0 @@
apiVersion: v2
name: external-secrets
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: external-secrets
repository: https://charts.external-secrets.io
version: 0.13.0

View File

@@ -1,81 +0,0 @@
apiVersion: v1
data:
vault.pem: |
-----BEGIN CERTIFICATE-----
MIIEszCCA5ugAwIBAgIUZEzzxqEuYiKHkL1df+Cb22NRRJMwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAxMJZHVycC5pbmZvMB4XDTI1MDEyMzIyMzQ0MloXDTM1MDEy
MTExMTU1NVowIDEeMBwGA1UEAxMVdmF1bHQuaW5mcmEuZHVycC5pbmZvMIIBIjAN
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAkZM0ue4bMcmmATs+kGYSpR2hLUzq
scGIwCtqmaKCMbd1xhmgjnIR3zvSRptLR2GVGvc1ti6qby0jXYvcqbxkHvay00zW
2zYN+M2m4lXpuWzg1t6NEoO6XGAsGj2v0vcVktPPU9uj0rGUVGWWfsvjoXqQFg5I
jdxsxK9SvMvw2XtE3FgKxpzCyw94InIHlcPwFTO+3ZdKStZlMbUDIkmszLBrWFcr
XOsPDfLxqMy0Ck//LKIt8djh3254FHB1GG5+kI+JSW1o+tUcL2NymvIINwm/2acS
1uTm+j9W7iEXav0pJNmm+/dzSskc3Y0ftM0h2HCXgitBIaEZnUVneNHOLwIDAQAB
o4IB7zCCAeswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
BBYEFCaQ2q7j7LyBGETEZ5qaJAdlISKCMB8GA1UdIwQYMBaAFO1jCyGkpFO+QiR2
dfBMWVYeWrQ2MIH0BggrBgEFBQcBAQSB5zCB5DAzBggrBgEFBQcwAYYnaHR0cHM6
Ly8xOTIuMTY4LjIwLjI1Mzo4MjAxL3YxL3BraS9vY3NwMD0GCCsGAQUFBzABhjFo
dHRwczovL3Jvb3QtdmF1bHQuaW50ZXJuYWwuZHVycC5pbmZvL3YxL3BraS9vY3Nw
MDEGCCsGAQUFBzAChiVodHRwczovLzE5Mi4xNjguMjAuMjUzOjgyMDEvdjEvcGtp
L2NhMDsGCCsGAQUFBzAChi9odHRwczovL3Jvb3QtdmF1bHQuaW50ZXJuYWwuZHVy
cC5pbmZvL3YxL3BraS9jYTAgBgNVHREEGTAXghV2YXVsdC5pbmZyYS5kdXJwLmlu
Zm8wbwYDVR0fBGgwZjAsoCqgKIYmaHR0cHM6Ly8xOTIuMTY4LjIwLjI1Mzo4MjAx
L3YxL3BraS9jcmwwNqA0oDKGMGh0dHBzOi8vcm9vdC12YXVsdC5pbnRlcm5hbC5k
dXJwLmluZm8vdjEvcGtpL2NybDANBgkqhkiG9w0BAQsFAAOCAQEAuJ+lplY/+A5L
5LzkljbKDTy3U6PLv1LtxqVCOFGiJXBnXMjtVW07bBEUadzFRNW8GHQ3w5QzOG6k
/vE/TrrJho7l05J/uc+BUrPSNjefLmQV6hn4jrP86PR0vzRfbSqKKBIID9M7+zi6
GFvHlVkSHsQyMQp7JOoax9KVzW2Y+OIgw7Lgw2tP122WCt2SIF0QenoZHsoW0guj
tzTJRmJDjn6XeJ7L3FPkf37H6ub0Jg3zBGr6eorEFfYZNN5CXezjqMFBpRdq4UIo
1M3A7o3uyZFcFsp/vGDcMBkwaCsBV9idu/HwkvGaTUNI285ilBORPD0bMZnACq/9
+Q/cdsO5lg==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEmzCCA4OgAwIBAgIUQwCAs82sgSuiaVbjANHScO2DSfAwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAxMJZHVycC5pbmZvMB4XDTI1MDEyMzExMjEyNVoXDTM1MDEy
MTExMTU1NVowFDESMBAGA1UEAxMJZHVycC5pbmZvMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAn9fjGRqqFsqguz56X6cXZwEMtD9wElwSFCb4Fc8YTzlH
4fV13QwXKESLE/Q+7bw4y4FJQ8BiGNbxxbQOOgWhfGGlQyFa1lfhJtYLfqRN5C2/
S7nr0YxDB9duc4OAExVL6Pr4/Koc+vDZY03l7RzwnF2AOM9DjFTASw01TphCQjRk
U+upiN2TUhUPejV/gMR+zXM6pn98UBKG1dNubS0HzAMwAEXAPm141NDyWUCPT9+3
6P03Ka8mUTx3X49OCtvJEGEQbtlnTFQaOSkP1yLW+XRMHw3sQaV2PWXu5fInbEpZ
+SuzmgLOXtmQNmHLav9q1qeTVkpBGPWvfh2Vh1JJhQIDAQABo4IB4zCCAd8wDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJaP17f1Zw0V
55Ks9Uf0USVWl0BPMB8GA1UdIwQYMBaAFO1jCyGkpFO+QiR2dfBMWVYeWrQ2MIH0
BggrBgEFBQcBAQSB5zCB5DAzBggrBgEFBQcwAYYnaHR0cHM6Ly8xOTIuMTY4LjIw
LjI1Mzo4MjAxL3YxL3BraS9vY3NwMD0GCCsGAQUFBzABhjFodHRwczovL3Jvb3Qt
dmF1bHQuaW50ZXJuYWwuZHVycC5pbmZvL3YxL3BraS9vY3NwMDEGCCsGAQUFBzAC
hiVodHRwczovLzE5Mi4xNjguMjAuMjUzOjgyMDEvdjEvcGtpL2NhMDsGCCsGAQUF
BzAChi9odHRwczovL3Jvb3QtdmF1bHQuaW50ZXJuYWwuZHVycC5pbmZvL3YxL3Br
aS9jYTAUBgNVHREEDTALgglkdXJwLmluZm8wbwYDVR0fBGgwZjAsoCqgKIYmaHR0
cHM6Ly8xOTIuMTY4LjIwLjI1Mzo4MjAxL3YxL3BraS9jcmwwNqA0oDKGMGh0dHBz
Oi8vcm9vdC12YXVsdC5pbnRlcm5hbC5kdXJwLmluZm8vdjEvcGtpL2NybDANBgkq
hkiG9w0BAQsFAAOCAQEAiqAZ4zNIEkCWcvpDRq0VyJuk59sVtJr5X4FscHQ179nE
QbbvMe+EBDFS6XQml1Elj8jiPa/D5O9Oc6Iisnm5+weZKwApz/lQ+XVkWLCoEplB
ZZ9fcWVCbMLt0xlt8qn5z/mYKfbCT7ZCqDO+prQZt+ADJcQbiknfroAAqEbNKxwN
Y9uUyOWNF3SxJEch4w2dtX+IEVmxeZnhMy8OuP0SQKl8aW40ugiG0ZD5yTBBfOD9
zsrGSU/iSatn0b7bevBhaL96hz1/rNR1cL+4/albX2hrr8Rv3/SB2DLtNQlQW0ls
AfhXAqP5zL+Ytgf1Of/pVdgnhxrYUY7RKCSGY5Hagw==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDLzCCAhegAwIBAgIUNHdvOzam2HPVdwXpMHUy4wl8ZRYwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAxMJZHVycC5pbmZvMB4XDTI1MDEyMzExMTUyNVoXDTM1MDEy
MTExMTU1NVowFDESMBAGA1UEAxMJZHVycC5pbmZvMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEA8XDTVEtRI3+k4yuvqVqfIiLRQJcXbmhfVtAeYk+5j9Ox
p1w9YHdnPLqLFrD1PzadjqYeAp/fwlEFfs6lqwoTS8S9vhaFqcgB57nVMb77dTBb
/08XHXOU6FPRjdFKm5QMpS7tn1XacPMy/o0bKqRREQeiuFDGVRyuF5PUgvWc1dvJ
l27JvvgYktgjfpNS4DlCxg4lGXT5abvaKf2hnr65egaIo/yRWN9wnvAzRiY7oci7
GA1oKz87Yc1tfL2gcynrwccOOCF/eUKesJR1I6GXNkN/a1fcr+Ld9Z9NhHBtO+vE
N8DsZY+kG7DE3M4BCCTFUzllcYHjaW4HaF9vZW+PYwIDAQABo3kwdzAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU7WMLIaSkU75CJHZ1
8ExZVh5atDYwHwYDVR0jBBgwFoAU7WMLIaSkU75CJHZ18ExZVh5atDYwFAYDVR0R
BA0wC4IJZHVycC5pbmZvMA0GCSqGSIb3DQEBCwUAA4IBAQAS/qUI/1Yv07xUTK5k
r93kC7GSPpmpkXIsfjChAl93sebN143fu70NUP74jjCc0Wkb8hRofGg10E+/24r1
AI0KsLhzKzfIASxUVQAn8RTptLruaaPLboSA4MUZ8IB5y8Vy8E3/KtD0gD80j64Y
rm9XGHA0HTJHbPUTb/Rux2g0E7WtiyWSWH8mqzbegU8IrkM3eVT4+ylBE7YkfWDD
dw44sB71tfmDKpzWg6XQ6YMh0YfnyG1fYCj9LhuecNY9Uuo6cjDaAvkzMewWwqDx
Q2Ekas98Di6itCP8vET+gBDjeCc+XR6Hx6vzWmxlZhwDuxEKL1a2/DabUxJyMNzv
55Fn
-----END CERTIFICATE-----
kind: ConfigMap
metadata:
name: ca-pemstore

View File

@@ -1,94 +0,0 @@
external-secrets:
replicaCount: 3
revisionHistoryLimit: 1
leaderElect: true
installCRDs: true
crds:
createClusterExternalSecret: true
createClusterSecretStore: true
createClusterGenerator: true
createPushSecret: true
conversion:
enabled: false
image:
repository: registry.internal.durp.info/external-secrets/external-secrets
pullPolicy: Always
extraVolumes:
- name: ca-pemstore
configMap:
name: ca-pemstore
extraVolumeMounts:
- name: ca-pemstore
mountPath: /etc/ssl/certs/vault.pem
subPath: vault.pem
readOnly: true
resources:
requests:
memory: 32Mi
cpu: 10m
limits:
memory: 32Mi
cpu: 10m
webhook:
create: false
failurePolicy: Ignore
log:
level: debug
image:
repository: registry.internal.durp.info/external-secrets/external-secrets
pullPolicy: Always
extraVolumes:
- name: ca-pemstore
configMap:
name: ca-pemstore
extraVolumeMounts:
- name: ca-pemstore
mountPath: /etc/ssl/certs/vault.pem
subPath: vault.pem
readOnly: true
resources:
requests:
memory: 32Mi
cpu: 10m
limits:
memory: 32Mi
cpu: 10m
certController:
create: false
revisionHistoryLimit: 1
log:
level: debug
image:
repository: registry.internal.durp.info/external-secrets/external-secrets
pullPolicy: Always
tag: ""
resources:
requests:
memory: 32Mi
cpu: 10m
limits:
memory: 32Mi
cpu: 10m
extraVolumes:
- name: ca-pemstore
configMap:
name: ca-pemstore
extraVolumeMounts:
- name: ca-pemstore
mountPath: /etc/ssl/certs/vault.pem
subPath: vault.pem
readOnly: true

View File

@@ -1,101 +0,0 @@
#apiVersion: external-secrets.io/v1beta1
#kind: ExternalSecret
#metadata:
# name: ollama-secret
#spec:
# secretStoreRef:
# name: vault
# kind: ClusterSecretStore
# target:
# name: ollama-secret
# data:
# - secretKey: users
# remoteRef:
# key: secrets/internalproxy/ollama
# property: users
#
#---
#
#apiVersion: traefik.containo.us/v1alpha1
#kind: Middleware
#metadata:
# name: ollama-basic-auth
#spec:
# basicAuth:
# secret: ollama-secret
#
#---
#
#apiVersion: v1
#kind: Service
#metadata:
# name: ollama
#spec:
# ports:
# - name: app
# port: 11435
# protocol: TCP
# targetPort: 11435
# clusterIP: None
# type: ClusterIP
#
#---
#
#apiVersion: v1
#kind: Endpoints
#metadata:
# name: ollama
#subsets:
# - addresses:
# - ip: 192.168.20.104
# ports:
# - name: app
# port: 11435
# protocol: TCP
#
#---
#
#apiVersion: traefik.containo.us/v1alpha1
#kind: IngressRoute
#metadata:
# name: ollama-ingress
#spec:
# entryPoints:
# - websecure
# routes:
# - match: Host(`ollama.durp.info`) && PathPrefix(`/`)
# middlewares:
# - name: ollama-basic-auth
# kind: Rule
# services:
# - name: ollama
# port: 11435
# tls:
# secretName: ollama-tls
#
#---
#
#apiVersion: cert-manager.io/v1
#kind: Certificate
#metadata:
# name: ollama-tls
#spec:
# secretName: ollama-tls
# issuerRef:
# name: letsencrypt-production
# kind: ClusterIssuer
# commonName: "ollama.durp.info"
# dnsNames:
# - "ollama.durp.info"
#
#---
#
#kind: Service
#apiVersion: v1
#metadata:
# name: ollama-external-dns
# annotations:
# external-dns.alpha.kubernetes.io/hostname: ollama.durp.info
#spec:
# type: ExternalName
# externalName: durp.info

View File

@@ -1,12 +0,0 @@
apiVersion: v2
name: metallb-system
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.16.0"
dependencies:
- name: metallb
repository: https://metallb.github.io/metallb
version: 0.14.9

View File

@@ -1,115 +0,0 @@
resource "proxmox_vm_qemu" "k3smaster" {
count = local.k3smaster.count
ciuser = "administrator"
vmid = "${local.vlan}${local.k3smaster.ip[count.index]}"
name = local.k3smaster.name[count.index]
target_node = local.k3smaster.node[count.index]
clone = local.template
tags = local.k3smaster.tags
qemu_os = "l26"
full_clone = true
os_type = "cloud-init"
agent = 1
cores = local.k3smaster.cores
sockets = 1
cpu_type = "host"
memory = local.k3smaster.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.k3smaster.drive
format = local.format
storage = local.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.k3smaster.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}
resource "proxmox_vm_qemu" "k3sserver" {
count = local.k3sserver.count
ciuser = "administrator"
vmid = "${local.vlan}${local.k3sserver.ip[count.index]}"
name = local.k3sserver.name[count.index]
target_node = local.k3sserver.node[count.index]
clone = local.template
tags = local.k3sserver.tags
qemu_os = "l26"
full_clone = true
os_type = "cloud-init"
agent = 1
cores = local.k3sserver.cores
sockets = 1
cpu_type = "host"
memory = local.k3sserver.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.k3sserver.drive
format = local.format
storage = local.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.k3sserver.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}

View File

@@ -1,48 +0,0 @@
terraform {
backend "http" {}
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "3.0.1-rc6"
}
}
}
provider "proxmox" {
pm_parallel = 1
pm_tls_insecure = true
pm_api_url = var.pm_api_url
pm_user = var.pm_user
pm_password = var.pm_password
pm_debug = false
}
locals {
sshkeys = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDEphzWgwUZnvL6E5luKLt3WO0HK7Kh63arSMoNl5gmjzXyhG1DDW0OKfoIl0T+JZw/ZjQ7iii6tmSLFRk6nuYCldqe5GVcFxvTzX4/xGEioAyG0IiUGKy6s+9xzO8QXF0EtSNPH0nfHNKcCjgwWAzM+Lt6gW0Vqs+aU5ICuDiEchmvYPz+rBaVldJVTG7m3ogKJ2aIF7HU/pCPp5l0E9gMOw7s0ABijuc3KXLEWCYgL39jIST6pFH9ceRLmu8Xy5zXHAkkEEauY/e6ld0hlzLadiUD7zYJMdDcm0oRvenYcUlaUl9gS0569IpfsJsjCejuqOxCKzTHPJDOT0f9TbIqPXkGq3s9oEJGpQW+Z8g41BqRpjBCdBk+yv39bzKxlwlumDwqgx1WP8xxKavAWYNqNRG7sBhoWwtxYEOhKXoLNjBaeDRnO5OY5AQJvONWpuByyz0R/gTh4bOFVD+Y8WWlKbT4zfhnN70XvapRsbZiaGhJBPwByAMGg6XxSbC6xtbyligVGCEjCXbTLkeKq1w0DuItY+FBGO3J2k90OiciTVSeyiVz9J/Y03UB0gHdsMCoVNrj+9QWfrTLDhM7D5YrXUt5nj2LQTcbtf49zoQXWxUhozlg42E/FJU/Yla7y55qWizAEVyP2/Ks/PHrF679k59HNd2IJ/aicA9QnmWtLQ== ansible"
template = "Debian12-Template"
storage = "cache-domains"
emulatessd = true
format = "raw"
dnsserver = "192.168.98.1"
vlan = 98
k3smaster = {
tags = "k3s_dmz"
count = 3
name = ["master01-dmz", "master02-dmz", "master03-dmz"]
cores = 2
memory = "4096"
drive = 20
node = ["mothership", "overlord", "vanguard"]
ip = ["11", "12", "13"]
}
k3sserver = {
tags = "k3s_dmz"
count = 3
name = ["node01-dmz", "node02-dmz", "node03-dmz"]
cores = 4
memory = "8192"
drive = 240
node = ["mothership", "overlord", "vanguard"]
ip = ["21", "22", "23"]
}
}

View File

@@ -1,14 +0,0 @@
variable "pm_api_url" {
description = "API URL to Proxmox provider"
type = string
}
variable "pm_password" {
description = "Passowrd to Proxmox provider"
type = string
}
variable "pm_user" {
description = "UIsername to Proxmox provider"
type = string
}

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: traefik-configmap
data:
config.yml: |
http:
routers:
router0:
service: service0
rule: Host(`testing.durp.info`)
services:
service0:
loadBalancer:
servers:
- url: https://192.168.20.130

View File

@@ -1,113 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: traefik-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`traefik.durp.info`)
kind: Rule
services:
- name: api@internal
kind: TraefikService
tls:
secretName: traefik-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: traefik-tls
namespace: traefik
spec:
secretName: traefik-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "traefik.durp.info"
dnsNames:
- "traefik.durp.info"
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: authentik-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`authentik.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: infra-cluster
port: 443
tls:
secretName: authentik-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: authentik-tls
spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
secretName: authentik-tls
commonName: "authentik.durp.info"
dnsNames:
- "authentik.durp.info"
---
apiVersion: v1
kind: Endpoints
metadata:
name: master-cluster
subsets:
- addresses:
- ip: 192.168.20.130
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: master-cluster
spec:
ports:
- protocol: TCP
port: 443
targetPort: 443
---
apiVersion: v1
kind: Endpoints
metadata:
name: infra-cluster
subsets:
- addresses:
- ip: 192.168.12.130
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: infra-cluster
spec:
ports:
- protocol: TCP
port: 443
targetPort: 443

View File

@@ -1,59 +0,0 @@
traefik:
image:
registry: registry.durp.info
repository: traefik
pullPolicy: Always
providers:
kubernetesCRD:
allowCrossNamespace: false
allowExternalNameServices: true
allowEmptyServices: false
deployment:
replicas: 3
revisionHistoryLimit: 1
volumes:
- name: traefik-configmap
mountPath: "/config"
type: configMap
ingressRoute:
dashboard:
enabled: true
additionalArguments:
- "--providers.file.filename=/config/config.yml"
- "--serversTransport.insecureSkipVerify=true"
- "--log.level=DEBUG"
- --experimental.plugins.jwt.moduleName=github.com/traefik-plugins/traefik-jwt-plugin
- --experimental.plugins.jwt.version=v0.7.0
autoscaling:
enabled: true
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Pods
value: 1
periodSeconds: 60
# -- [Resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for `traefik` container.
resources:
requests:
cpu: "100m"
memory: "512Mi"
limits:
memory: "512Mi"

View File

@@ -1,12 +0,0 @@
apiVersion: v2
name: vault
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: vault
repository: https://helm.releases.hashicorp.com
version: 0.29.1

View File

@@ -1,23 +0,0 @@
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: vault
spec:
provider:
vault:
server: "https://vault.infra.durp.info"
path: "kv"
version: "v2"
auth:
kubernetes:
mountPath: "dmz-cluster"
role: "external-secrets"
serviceAccountRef:
name: "vault"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault

View File

@@ -1,13 +0,0 @@
vault:
global:
enabled: true
tlsDisable: false
externalVaultAddr: "https://vault.infra.durp.info"
resources:
requests:
memory: 256Mi
cpu: 250m
limits:
memory: 256Mi
cpu: 250m

View File

@@ -9,4 +9,4 @@ appVersion: 0.0.1
dependencies:
- name: external-dns
repository: https://charts.bitnami.com/bitnami
version: 8.3.8
version: 6.20.3

View File

@@ -4,10 +4,10 @@ external-dns:
image:
pullPolicy: Always
txtPrefix: "dmz-"
sources:
- service
provider: cloudflare
cloudflare:
secretName : "external-dns"

View File

@@ -8,5 +8,5 @@ appVersion: 0.0.1
dependencies:
- name: external-secrets
repository: https://charts.external-secrets.io
version: 0.10.4
version: 0.8.1

View File

@@ -1,3 +1,20 @@
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: vault
spec:
provider:
vault:
server: "https://vault.internal.prd.durp.info"
path: "secrets"
version: "v2"
auth:
kubernetes:
mountPath: "kubernetes"
role: "dmz-external-secrets"
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
@@ -11,13 +28,6 @@ spec:
data:
- secretKey: cloudflare-api-token-secret
remoteRef:
key: kv/cert-manager
key: secrets/cert-manager
property: cloudflare-api-token-secret
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault

View File

@@ -8,4 +8,4 @@ appVersion: 0.0.1
dependencies:
- name: gatekeeper
repository: https://open-policy-agent.github.io/gatekeeper/charts
version: 3.17.1
version: 3.14.0

277
gatekeeper/values.yaml Normal file
View File

@@ -0,0 +1,277 @@
gatekeeper:
replicas: 3
revisionHistoryLimit: 10
auditInterval: 60
metricsBackends: ["prometheus"]
auditMatchKindOnly: false
constraintViolationsLimit: 20
auditFromCache: false
disableMutation: false
disableValidatingWebhook: false
validatingWebhookName: gatekeeper-validating-webhook-configuration
validatingWebhookTimeoutSeconds: 3
validatingWebhookFailurePolicy: Ignore
validatingWebhookAnnotations: {}
validatingWebhookExemptNamespacesLabels: {}
validatingWebhookObjectSelector: {}
validatingWebhookCheckIgnoreFailurePolicy: Fail
validatingWebhookCustomRules: {}
validatingWebhookURL: null
enableDeleteOperations: false
enableExternalData: true
enableGeneratorResourceExpansion: true
enableTLSHealthcheck: false
maxServingThreads: -1
mutatingWebhookName: gatekeeper-mutating-webhook-configuration
mutatingWebhookFailurePolicy: Ignore
mutatingWebhookReinvocationPolicy: Never
mutatingWebhookAnnotations: {}
mutatingWebhookExemptNamespacesLabels: {}
mutatingWebhookObjectSelector: {}
mutatingWebhookTimeoutSeconds: 1
mutatingWebhookCustomRules: {}
mutatingWebhookURL: null
mutationAnnotations: false
auditChunkSize: 500
logLevel: INFO
logDenies: false
logMutations: false
emitAdmissionEvents: false
emitAuditEvents: false
admissionEventsInvolvedNamespace: false
auditEventsInvolvedNamespace: false
resourceQuota: true
externaldataProviderResponseCacheTTL: 3m
image:
repository: openpolicyagent/gatekeeper
crdRepository: openpolicyagent/gatekeeper-crds
release: v3.15.0-beta.0
pullPolicy: Always
pullSecrets: []
preInstall:
crdRepository:
image:
repository: null
tag: v3.15.0-beta.0
postUpgrade:
labelNamespace:
enabled: false
image:
repository: openpolicyagent/gatekeeper-crds
tag: v3.15.0-beta.0
pullPolicy: IfNotPresent
pullSecrets: []
extraNamespaces: []
podSecurity: ["pod-security.kubernetes.io/audit=restricted",
"pod-security.kubernetes.io/audit-version=latest",
"pod-security.kubernetes.io/warn=restricted",
"pod-security.kubernetes.io/warn-version=latest",
"pod-security.kubernetes.io/enforce=restricted",
"pod-security.kubernetes.io/enforce-version=v1.24"]
extraAnnotations: {}
priorityClassName: ""
affinity: {}
tolerations: []
nodeSelector: {kubernetes.io/os: linux}
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 999
runAsNonRoot: true
runAsUser: 1000
postInstall:
labelNamespace:
enabled: true
extraRules: []
image:
repository: openpolicyagent/gatekeeper-crds
tag: v3.15.0-beta.0
pullPolicy: IfNotPresent
pullSecrets: []
extraNamespaces: []
podSecurity: ["pod-security.kubernetes.io/audit=restricted",
"pod-security.kubernetes.io/audit-version=latest",
"pod-security.kubernetes.io/warn=restricted",
"pod-security.kubernetes.io/warn-version=latest",
"pod-security.kubernetes.io/enforce=restricted",
"pod-security.kubernetes.io/enforce-version=v1.24"]
extraAnnotations: {}
priorityClassName: ""
probeWebhook:
enabled: true
image:
repository: curlimages/curl
tag: 7.83.1
pullPolicy: IfNotPresent
pullSecrets: []
waitTimeout: 60
httpTimeout: 2
insecureHTTPS: false
priorityClassName: ""
affinity: {}
tolerations: []
nodeSelector: {kubernetes.io/os: linux}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 999
runAsNonRoot: true
runAsUser: 1000
preUninstall:
deleteWebhookConfigurations:
extraRules: []
enabled: false
image:
repository: openpolicyagent/gatekeeper-crds
tag: v3.15.0-beta.0
pullPolicy: IfNotPresent
pullSecrets: []
priorityClassName: ""
affinity: {}
tolerations: []
nodeSelector: {kubernetes.io/os: linux}
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 999
runAsNonRoot: true
runAsUser: 1000
podAnnotations: {}
auditPodAnnotations: {}
podLabels: {}
podCountLimit: "100"
secretAnnotations: {}
enableRuntimeDefaultSeccompProfile: true
controllerManager:
exemptNamespaces: []
exemptNamespacePrefixes: []
hostNetwork: false
dnsPolicy: ClusterFirst
port: 8443
metricsPort: 8888
healthPort: 9090
readinessTimeout: 1
livenessTimeout: 1
priorityClassName: system-cluster-critical
disableCertRotation: false
tlsMinVersion: 1.3
clientCertName: ""
strategyType: RollingUpdate
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: gatekeeper.sh/operation
operator: In
values:
- webhook
topologyKey: kubernetes.io/hostname
weight: 100
topologySpreadConstraints: []
tolerations: []
nodeSelector: {kubernetes.io/os: linux}
resources:
limits:
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 999
runAsNonRoot: true
runAsUser: 1000
podSecurityContext:
fsGroup: 999
supplementalGroups:
- 999
extraRules: []
networkPolicy:
enabled: false
ingress: { }
# - from:
# - ipBlock:
# cidr: 0.0.0.0/0
audit:
enablePubsub: false
connection: audit-connection
channel: audit-channel
hostNetwork: false
dnsPolicy: ClusterFirst
metricsPort: 8888
healthPort: 9090
readinessTimeout: 1
livenessTimeout: 1
priorityClassName: system-cluster-critical
disableCertRotation: false
affinity: {}
tolerations: []
nodeSelector: {kubernetes.io/os: linux}
resources:
limits:
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 999
runAsNonRoot: true
runAsUser: 1000
podSecurityContext:
fsGroup: 999
supplementalGroups:
- 999
writeToRAMDisk: false
extraRules: []
crds:
affinity: {}
tolerations: []
nodeSelector: {kubernetes.io/os: linux}
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
pdb:
controllerManager:
minAvailable: 1
service: {}
disabledBuiltins: ["{http.send}"]
psp:
enabled: true
upgradeCRDs:
enabled: true
extraRules: []
priorityClassName: ""
rbac:
create: true
externalCertInjection:
enabled: false
secretName: gatekeeper-webhook-server-cert

View File

@@ -8,4 +8,4 @@ appVersion: 0.0.1
dependencies:
- name: gitlab-runner
repository: https://charts.gitlab.io/
version: 0.69.0
version: 0.43.0

View File

@@ -6,7 +6,7 @@ gitlab-runner:
imagePullPolicy: Always
gitlabUrl: https://gitlab.com/
unregisterRunner: false
unregisterRunner: true
terminationGracePeriodSeconds: 3600
concurrent: 10
checkInterval: 30
@@ -68,4 +68,4 @@ gitlab-runner:
memory: 2Gi
requests:
memory: 128Mi
cpu: 500m
cpu: 500m

View File

@@ -1,95 +0,0 @@
stages:
- plan
- apply
- destroy
variables:
WORKDIR: $CI_PROJECT_DIR/infra/terraform
GITLAB_TF_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/infra
image:
name: registry.internal.durp.info/opentofu/opentofu:latest
entrypoint: [""]
.tf-init:
before_script:
- cd $WORKDIR
- tofu init
-reconfigure
-backend-config="address=${GITLAB_TF_ADDRESS}"
-backend-config="lock_address=${GITLAB_TF_ADDRESS}/lock"
-backend-config="unlock_address=${GITLAB_TF_ADDRESS}/lock"
-backend-config="username=gitlab-ci-token"
-backend-config="password=${CI_JOB_TOKEN}"
-backend-config="lock_method=POST"
-backend-config="unlock_method=DELETE"
-backend-config="retry_wait_min=5"
format:
stage: .pre
allow_failure: false
script:
- cd $WORKDIR
- tofu fmt -diff -check -write=false
rules:
- changes:
- "infra/terraform/*.tf"
validate:
stage: .pre
allow_failure: false
extends: .tf-init
script:
- tofu validate
rules:
- changes:
- "infra/terraform/*.tf"
plan-infrastructure:
stage: plan
variables:
PLAN: plan.tfplan
JSON_PLAN_FILE: tfplan.json
ENVIRONMENT_NAME: infra
allow_failure: false
extends: .tf-init
script:
- apk add --update curl jq
- alias convert_report="jq -r '([.resource_changes[].change.actions?]|flatten)|{\"create\":(map(select(.==\"create\"))|length),\"update\":(map(select(.==\"update\"))|length),\"delete\":(map(select(.==\"delete\"))|length)}'"
- tofu plan -out=$PLAN $ARGUMENTS
- tofu show --json $PLAN | jq -r '([.resource_changes[].change.actions?]|flatten)|{"create":(map(select(.=="create"))|length),"update":(map(select(.=="update"))|length),"delete":(map(select(.=="delete"))|length)}' > $JSON_PLAN_FILE
artifacts:
reports:
terraform: $WORKDIR/$JSON_PLAN_FILE
needs: ["validate","format"]
rules:
- changes:
- "infra/terraform/*.tf"
apply-infrastructure:
stage: apply
variables:
ENVIRONMENT_NAME: infra
allow_failure: false
extends: .tf-init
script:
- tofu apply -auto-approve $ARGUMENTS
rules:
- changes:
- "infra/terraform/*.tf"
when: manual
needs: ["plan-infrastructure"]
destroy-infrastructure:
stage: destroy
variables:
ENVIRONMENT_NAME: infra
allow_failure: false
extends: .tf-init
script:
- tofu destroy -auto-approve $ARGUMENTS
rules:
- changes:
- "infra/terraform/*.tf"
when: manual
needs: ["plan-infrastructure"]

View File

@@ -1,12 +0,0 @@
apiVersion: v2
name: argocd
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.16.0"
dependencies:
- name: argo-cd
repository: https://argoproj.github.io/argo-helm
version: 6.11.1

View File

@@ -1,79 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argocd
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/argocd
destination:
namespace: argocd
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
---
#apiVersion: external-secrets.io/v1beta1
#kind: ExternalSecret
#metadata:
# name: vault-argocd
# labels:
# app.kubernetes.io/part-of: argocd
#spec:
# secretStoreRef:
# name: vault
# kind: ClusterSecretStore
# target:
# name: client-secret
# data:
# - secretKey: clientSecret
# remoteRef:
# key: secrets/argocd/authentik
# property: clientsecret
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: argocd-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
spec:
entryPoints:
- websecure
routes:
- match: Host(`argocd.infra.durp.info`)
#middlewares:
# - name: whitelist
# namespace: traefik
kind: Rule
services:
- name: argocd-server
port: 443
scheme: https
tls:
secretName: argocd-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: argocd-tls
spec:
secretName: argocd-tls
issuerRef:
name: vault-issuer
kind: ClusterIssuer
commonName: "argocd.infra.durp.info"
dnsNames:
- "argocd.infra.durp.info"

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: authentik
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/authentik
destination:
namespace: authentik
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,44 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/cert-manager
destination:
namespace: cert-manager
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager-dmz
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: dmz/cert-manager
destination:
namespace: cert-manager
name: dmz
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,44 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: external-secrets
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/external-secrets
destination:
namespace: external-secrets
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: external-secrets-dmz
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: dmz/external-secrets
destination:
namespace: external-secrets
name: dmz
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: internal-proxy
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: dmz/internalproxy
destination:
namespace: internalproxy
name: dmz
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,44 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: metallb-system
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/metallb-system
destination:
namespace: metallb-system
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: metallb-system-dmz
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: dmz/metallb-system
destination:
namespace: metallb-system
name: dmz
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,44 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: traefik
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/traefik
destination:
namespace: traefik
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: traefik-dmz
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: dmz/traefik
destination:
namespace: traefik
name: dmz
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,53 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vault
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/vault
destination:
namespace: vault
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- group: admissionregistration.k8s.io
kind: MutatingWebhookConfiguration
jqPathExpressions:
- .webhooks[]?.clientConfig.caBundle
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vault-dmz
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: dmz/vault
destination:
namespace: vault
name: dmz
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- group: admissionregistration.k8s.io
kind: MutatingWebhookConfiguration
jqPathExpressions:
- .webhooks[]?.clientConfig.caBundle

View File

@@ -1,62 +0,0 @@
argo-cd:
global:
revisionHistoryLimit: 1
image:
repository: registry.durp.info/argoproj/argocd
imagePullPolicy: Always
server:
#extraArgs:
# - --dex-server-plaintext
# - --dex-server=argocd-dex-server:5556
# oidc.config: |
# name: AzureAD
# issuer: https://login.microsoftonline.com/TENANT_ID/v2.0
# clientID: CLIENT_ID
# clientSecret: $oidc.azuread.clientSecret
# requestedIDTokenClaims:
# groups:
# essential: true
# requestedScopes:
# - openid
# - profile
# - email
dex:
enabled: true
image:
repository: registry.durp.info/dexidp/dex
imagePullPolicy: Always
configs:
cm:
create: true
annotations: {}
url: https://argocd.internal.durp.info
oidc.tls.insecure.skip.verify: "true"
dex.config: |
connectors:
- config:
issuer: https://authentik.durp.info/application/o/argocd/
clientID: dbb8ffc06104fb6e7fac3e4ae7fafb1d90437625
clientSecret: $client-secret:clientSecret
insecureEnableGroups: true
scopes:
- openid
- profile
- email
- groups
name: authentik
type: oidc
id: authentik
rbac:
create: true
policy.csv: |
g, ArgoCD Admins, role:admin
scopes: "[groups]"
server:
route:
enabled: false

View File

@@ -1,12 +0,0 @@
apiVersion: v2
name: authentik
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.16.0"
dependencies:
- name: authentik
repository: https://charts.goauthentik.io
version: 2024.8.3

View File

@@ -1,31 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: authentik-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`authentik.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: authentik-server
port: 80
tls:
secretName: authentik-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: authentik-tls
spec:
secretName: authentik-tls
issuerRef:
name: vault-issuer
kind: ClusterIssuer
commonName: "authentik.durp.info"
dnsNames:
- "authentik.durp.info"

View File

@@ -1,35 +0,0 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: authentik-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: db-pass
data:
- secretKey: dbpass
remoteRef:
key: kv/authentik/database
property: dbpass
- secretKey: secretkey
remoteRef:
key: kv/authentik/database
property: secretkey
- secretKey: postgresql-postgres-password
remoteRef:
key: kv/authentik/database
property: dbpass
- secretKey: postgresql-password
remoteRef:
key: kv/authentik/database
property: dbpass
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault

View File

@@ -1,56 +0,0 @@
authentik:
global:
env:
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
name: db-pass
key: dbpass
- name: AUTHENTIK_SECRET_KEY
valueFrom:
secretKeyRef:
name: db-pass
key: secretkey
revisionHistoryLimit: 1
image:
repository: registry.durp.info/goauthentik/server
pullPolicy: Always
authentik:
outposts:
container_image_base: registry.durp.info/goauthentik/%(type)s:%(version)s
postgresql:
host: '{{ .Release.Name }}-postgresql-hl'
name: "authentik"
user: "authentik"
port: 5432
server:
name: server
replicas: 3
worker:
replicas: 3
postgresql:
enabled: true
image:
registry: registry.durp.info
repository: bitnami/postgresql
pullPolicy: Always
postgresqlUsername: "authentik"
postgresqlDatabase: "authentik"
existingSecret: db-pass
persistence:
enabled: true
storageClass: longhorn
accessModes:
- ReadWriteMany
redis:
enabled: true
master:
persistence:
enabled: false
image:
registry: registry.durp.info
repository: bitnami/redis
pullPolicy: Always
architecture: standalone
auth:
enabled: false

View File

@@ -1,11 +0,0 @@
apiVersion: v2
name: cert-manager
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.16.3

View File

@@ -1,6 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: issuer
secrets:
- name: issuer-token-lmzpj

File diff suppressed because one or more lines are too long

View File

@@ -1,26 +0,0 @@
cert-manager:
crds:
enabled: true
image:
registry: registry.internal.durp.info
repository: jetstack/cert-manager-controller
pullPolicy: Always
replicaCount: 3
#extraArgs:
# - --dns01-recursive-nameservers=1.1.1.1:53,1.0.0.1:53
# - --dns01-recursive-nameservers-only
#podDnsPolicy: None
#podDnsConfig:
# nameservers:
# - "1.1.1.1"
# - "1.0.0.1"
webhook:
image:
registry: registry.internal.durp.info
repository: jetstack/cert-manager-webhook
pullPolicy: Always
cainjector:
image:
registry: registry.internal.durp.info
repository: jetstack/cert-manager-cainjector
pullPolicy: Always

View File

@@ -1,11 +0,0 @@
apiVersion: v2
name: external-secrets
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: external-secrets
repository: https://charts.external-secrets.io
version: 0.13.0

View File

@@ -1,81 +0,0 @@
apiVersion: v1
data:
vault.pem: |
-----BEGIN CERTIFICATE-----
MIIEszCCA5ugAwIBAgIUZEzzxqEuYiKHkL1df+Cb22NRRJMwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAxMJZHVycC5pbmZvMB4XDTI1MDEyMzIyMzQ0MloXDTM1MDEy
MTExMTU1NVowIDEeMBwGA1UEAxMVdmF1bHQuaW5mcmEuZHVycC5pbmZvMIIBIjAN
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAkZM0ue4bMcmmATs+kGYSpR2hLUzq
scGIwCtqmaKCMbd1xhmgjnIR3zvSRptLR2GVGvc1ti6qby0jXYvcqbxkHvay00zW
2zYN+M2m4lXpuWzg1t6NEoO6XGAsGj2v0vcVktPPU9uj0rGUVGWWfsvjoXqQFg5I
jdxsxK9SvMvw2XtE3FgKxpzCyw94InIHlcPwFTO+3ZdKStZlMbUDIkmszLBrWFcr
XOsPDfLxqMy0Ck//LKIt8djh3254FHB1GG5+kI+JSW1o+tUcL2NymvIINwm/2acS
1uTm+j9W7iEXav0pJNmm+/dzSskc3Y0ftM0h2HCXgitBIaEZnUVneNHOLwIDAQAB
o4IB7zCCAeswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
BBYEFCaQ2q7j7LyBGETEZ5qaJAdlISKCMB8GA1UdIwQYMBaAFO1jCyGkpFO+QiR2
dfBMWVYeWrQ2MIH0BggrBgEFBQcBAQSB5zCB5DAzBggrBgEFBQcwAYYnaHR0cHM6
Ly8xOTIuMTY4LjIwLjI1Mzo4MjAxL3YxL3BraS9vY3NwMD0GCCsGAQUFBzABhjFo
dHRwczovL3Jvb3QtdmF1bHQuaW50ZXJuYWwuZHVycC5pbmZvL3YxL3BraS9vY3Nw
MDEGCCsGAQUFBzAChiVodHRwczovLzE5Mi4xNjguMjAuMjUzOjgyMDEvdjEvcGtp
L2NhMDsGCCsGAQUFBzAChi9odHRwczovL3Jvb3QtdmF1bHQuaW50ZXJuYWwuZHVy
cC5pbmZvL3YxL3BraS9jYTAgBgNVHREEGTAXghV2YXVsdC5pbmZyYS5kdXJwLmlu
Zm8wbwYDVR0fBGgwZjAsoCqgKIYmaHR0cHM6Ly8xOTIuMTY4LjIwLjI1Mzo4MjAx
L3YxL3BraS9jcmwwNqA0oDKGMGh0dHBzOi8vcm9vdC12YXVsdC5pbnRlcm5hbC5k
dXJwLmluZm8vdjEvcGtpL2NybDANBgkqhkiG9w0BAQsFAAOCAQEAuJ+lplY/+A5L
5LzkljbKDTy3U6PLv1LtxqVCOFGiJXBnXMjtVW07bBEUadzFRNW8GHQ3w5QzOG6k
/vE/TrrJho7l05J/uc+BUrPSNjefLmQV6hn4jrP86PR0vzRfbSqKKBIID9M7+zi6
GFvHlVkSHsQyMQp7JOoax9KVzW2Y+OIgw7Lgw2tP122WCt2SIF0QenoZHsoW0guj
tzTJRmJDjn6XeJ7L3FPkf37H6ub0Jg3zBGr6eorEFfYZNN5CXezjqMFBpRdq4UIo
1M3A7o3uyZFcFsp/vGDcMBkwaCsBV9idu/HwkvGaTUNI285ilBORPD0bMZnACq/9
+Q/cdsO5lg==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEmzCCA4OgAwIBAgIUQwCAs82sgSuiaVbjANHScO2DSfAwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAxMJZHVycC5pbmZvMB4XDTI1MDEyMzExMjEyNVoXDTM1MDEy
MTExMTU1NVowFDESMBAGA1UEAxMJZHVycC5pbmZvMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAn9fjGRqqFsqguz56X6cXZwEMtD9wElwSFCb4Fc8YTzlH
4fV13QwXKESLE/Q+7bw4y4FJQ8BiGNbxxbQOOgWhfGGlQyFa1lfhJtYLfqRN5C2/
S7nr0YxDB9duc4OAExVL6Pr4/Koc+vDZY03l7RzwnF2AOM9DjFTASw01TphCQjRk
U+upiN2TUhUPejV/gMR+zXM6pn98UBKG1dNubS0HzAMwAEXAPm141NDyWUCPT9+3
6P03Ka8mUTx3X49OCtvJEGEQbtlnTFQaOSkP1yLW+XRMHw3sQaV2PWXu5fInbEpZ
+SuzmgLOXtmQNmHLav9q1qeTVkpBGPWvfh2Vh1JJhQIDAQABo4IB4zCCAd8wDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJaP17f1Zw0V
55Ks9Uf0USVWl0BPMB8GA1UdIwQYMBaAFO1jCyGkpFO+QiR2dfBMWVYeWrQ2MIH0
BggrBgEFBQcBAQSB5zCB5DAzBggrBgEFBQcwAYYnaHR0cHM6Ly8xOTIuMTY4LjIw
LjI1Mzo4MjAxL3YxL3BraS9vY3NwMD0GCCsGAQUFBzABhjFodHRwczovL3Jvb3Qt
dmF1bHQuaW50ZXJuYWwuZHVycC5pbmZvL3YxL3BraS9vY3NwMDEGCCsGAQUFBzAC
hiVodHRwczovLzE5Mi4xNjguMjAuMjUzOjgyMDEvdjEvcGtpL2NhMDsGCCsGAQUF
BzAChi9odHRwczovL3Jvb3QtdmF1bHQuaW50ZXJuYWwuZHVycC5pbmZvL3YxL3Br
aS9jYTAUBgNVHREEDTALgglkdXJwLmluZm8wbwYDVR0fBGgwZjAsoCqgKIYmaHR0
cHM6Ly8xOTIuMTY4LjIwLjI1Mzo4MjAxL3YxL3BraS9jcmwwNqA0oDKGMGh0dHBz
Oi8vcm9vdC12YXVsdC5pbnRlcm5hbC5kdXJwLmluZm8vdjEvcGtpL2NybDANBgkq
hkiG9w0BAQsFAAOCAQEAiqAZ4zNIEkCWcvpDRq0VyJuk59sVtJr5X4FscHQ179nE
QbbvMe+EBDFS6XQml1Elj8jiPa/D5O9Oc6Iisnm5+weZKwApz/lQ+XVkWLCoEplB
ZZ9fcWVCbMLt0xlt8qn5z/mYKfbCT7ZCqDO+prQZt+ADJcQbiknfroAAqEbNKxwN
Y9uUyOWNF3SxJEch4w2dtX+IEVmxeZnhMy8OuP0SQKl8aW40ugiG0ZD5yTBBfOD9
zsrGSU/iSatn0b7bevBhaL96hz1/rNR1cL+4/albX2hrr8Rv3/SB2DLtNQlQW0ls
AfhXAqP5zL+Ytgf1Of/pVdgnhxrYUY7RKCSGY5Hagw==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDLzCCAhegAwIBAgIUNHdvOzam2HPVdwXpMHUy4wl8ZRYwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAxMJZHVycC5pbmZvMB4XDTI1MDEyMzExMTUyNVoXDTM1MDEy
MTExMTU1NVowFDESMBAGA1UEAxMJZHVycC5pbmZvMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEA8XDTVEtRI3+k4yuvqVqfIiLRQJcXbmhfVtAeYk+5j9Ox
p1w9YHdnPLqLFrD1PzadjqYeAp/fwlEFfs6lqwoTS8S9vhaFqcgB57nVMb77dTBb
/08XHXOU6FPRjdFKm5QMpS7tn1XacPMy/o0bKqRREQeiuFDGVRyuF5PUgvWc1dvJ
l27JvvgYktgjfpNS4DlCxg4lGXT5abvaKf2hnr65egaIo/yRWN9wnvAzRiY7oci7
GA1oKz87Yc1tfL2gcynrwccOOCF/eUKesJR1I6GXNkN/a1fcr+Ld9Z9NhHBtO+vE
N8DsZY+kG7DE3M4BCCTFUzllcYHjaW4HaF9vZW+PYwIDAQABo3kwdzAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU7WMLIaSkU75CJHZ1
8ExZVh5atDYwHwYDVR0jBBgwFoAU7WMLIaSkU75CJHZ18ExZVh5atDYwFAYDVR0R
BA0wC4IJZHVycC5pbmZvMA0GCSqGSIb3DQEBCwUAA4IBAQAS/qUI/1Yv07xUTK5k
r93kC7GSPpmpkXIsfjChAl93sebN143fu70NUP74jjCc0Wkb8hRofGg10E+/24r1
AI0KsLhzKzfIASxUVQAn8RTptLruaaPLboSA4MUZ8IB5y8Vy8E3/KtD0gD80j64Y
rm9XGHA0HTJHbPUTb/Rux2g0E7WtiyWSWH8mqzbegU8IrkM3eVT4+ylBE7YkfWDD
dw44sB71tfmDKpzWg6XQ6YMh0YfnyG1fYCj9LhuecNY9Uuo6cjDaAvkzMewWwqDx
Q2Ekas98Di6itCP8vET+gBDjeCc+XR6Hx6vzWmxlZhwDuxEKL1a2/DabUxJyMNzv
55Fn
-----END CERTIFICATE-----
kind: ConfigMap
metadata:
name: ca-pemstore

View File

@@ -1,70 +0,0 @@
external-secrets:
replicaCount: 3
revisionHistoryLimit: 1
leaderElect: true
installCRDs: true
crds:
createClusterExternalSecret: true
createClusterSecretStore: true
createClusterGenerator: true
createPushSecret: true
conversion:
enabled: false
image:
repository: registry.internal.durp.info/external-secrets/external-secrets
pullPolicy: Always
extraVolumes:
- name: ca-pemstore
configMap:
name: ca-pemstore
extraVolumeMounts:
- name: ca-pemstore
mountPath: /etc/ssl/certs/vault.pem
subPath: vault.pem
readOnly: true
resources:
requests:
memory: 32Mi
cpu: 10m
limits:
memory: 32Mi
cpu: 10m
webhook:
log:
level: debug
image:
repository: registry.internal.durp.info/external-secrets/external-secrets
pullPolicy: Always
resources:
requests:
memory: 32Mi
cpu: 10m
limits:
memory: 32Mi
cpu: 10m
certController:
create: false
revisionHistoryLimit: 1
log:
level: debug
image:
repository: registry.internal.durp.info/external-secrets/external-secrets
pullPolicy: Always
tag: ""
resources:
requests:
memory: 32Mi
cpu: 10m
limits:
memory: 32Mi
cpu: 10m

View File

@@ -1,32 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: longhorn-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
spec:
entryPoints:
- websecure
routes:
- match: Host(`longhorn.infra.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: longhorn-frontend
port: 80
tls:
secretName: longhorn-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: longhorn-tls
spec:
secretName: longhorn-tls
issuerRef:
name: vault-issuer
kind: ClusterIssuer
commonName: "longhorn.infra.durp.info"
dnsNames:
- "longhorn.infra.durp.info"

View File

@@ -1,30 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: external-longhorn-backup-token-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: longhorn-backup-token-secret
data:
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
key: kv/longhorn/backup
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_ENDPOINTS
remoteRef:
key: kv/longhorn/backup
property: AWS_ENDPOINTS
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
key: kv/longhorn/backup
property: AWS_SECRET_ACCESS_KEY

View File

@@ -1,192 +0,0 @@
longhorn:
global:
cattle:
systemDefaultRegistry: ""
image:
longhorn:
engine:
repository: longhornio/longhorn-engine
manager:
repository: longhornio/longhorn-manager
ui:
repository: longhornio/longhorn-ui
instanceManager:
repository: longhornio/longhorn-instance-manager
shareManager:
repository: longhornio/longhorn-share-manager
backingImageManager:
repository: longhornio/backing-image-manager
csi:
attacher:
repository: longhornio/csi-attacher
provisioner:
repository: longhornio/csi-provisioner
nodeDriverRegistrar:
repository: longhornio/csi-node-driver-registrar
resizer:
repository: longhornio/csi-resizer
snapshotter:
repository: longhornio/csi-snapshotter
pullPolicy: Always
service:
ui:
type: ClusterIP
nodePort: null
manager:
type: ClusterIP
nodePort: ""
loadBalancerIP: ""
loadBalancerSourceRanges: ""
persistence:
defaultClass: true
defaultFsType: ext4
defaultClassReplicaCount: 3
defaultDataLocality: disabled # best-effort otherwise
reclaimPolicy: Delete
migratable: false
recurringJobSelector:
enable: true
jobList: '[
{
"name":"backup",
"task":"backup",
"cron":"0 0 * * *",
"retain":24
}
]'
backingImage:
enable: false
name: ~
dataSourceType: ~
dataSourceParameters: ~
expectedChecksum: ~
csi:
kubeletRootDir: ~
attacherReplicaCount: ~
provisionerReplicaCount: ~
resizerReplicaCount: ~
snapshotterReplicaCount: ~
defaultSettings:
backupTarget: S3://longhorn-master@us-east-1/
backupTargetCredentialSecret: longhorn-backup-token-secret
allowRecurringJobWhileVolumeDetached: ~
createDefaultDiskLabeledNodes: ~
defaultDataPath: ~
defaultDataLocality: ~
replicaSoftAntiAffinity: ~
replicaAutoBalance: ~
storageOverProvisioningPercentage: ~
storageMinimalAvailablePercentage: ~
upgradeChecker: ~
defaultReplicaCount: ~
defaultLonghornStaticStorageClass: longhorn
backupstorePollInterval: ~
taintToleration: ~
systemManagedComponentsNodeSelector: ~
priorityClass: ~
autoSalvage: ~
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
disableSchedulingOnCordonedNode: ~
replicaZoneSoftAntiAffinity: ~
nodeDownPodDeletionPolicy: ~
allowNodeDrainWithLastHealthyReplica: ~
mkfsExt4Parameters: ~
disableReplicaRebuild: ~
replicaReplenishmentWaitInterval: ~
concurrentReplicaRebuildPerNodeLimit: ~
disableRevisionCounter: ~
systemManagedPodsImagePullPolicy: ~
allowVolumeCreationWithDegradedAvailability: ~
autoCleanupSystemGeneratedSnapshot: ~
concurrentAutomaticEngineUpgradePerNodeLimit: ~
backingImageCleanupWaitInterval: ~
backingImageRecoveryWaitInterval: ~
guaranteedEngineManagerCPU: ~
guaranteedReplicaManagerCPU: ~
kubernetesClusterAutoscalerEnabled: ~
orphanAutoDeletion: ~
storageNetwork: ~
privateRegistry:
createSecret: ~
registryUrl: ~
registryUser: ~
registryPasswd: ~
registrySecret: ~
longhornManager:
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornDriver:
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornUI:
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
#
ingress:
enabled: false
## Specify override namespace, specifically this is useful for using longhorn as sub-chart
## and its release namespace is not the `longhorn-system`
namespaceOverride: ""
# Annotations to add to the Longhorn Manager DaemonSet Pods. Optional.
annotations: {}
serviceAccount:
# Annotations to add to the service account
annotations: {}

View File

@@ -1,12 +0,0 @@
apiVersion: v2
name: metallb-system
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.16.0"
dependencies:
- name: metallb
repository: https://metallb.github.io/metallb
version: 0.14.9

View File

@@ -1,17 +0,0 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: cheap
spec:
addresses:
- 192.168.12.130-192.168.12.140
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: pool
namespace: metallb-system
spec:
ipAddressPools:
- cheap

View File

@@ -1,116 +0,0 @@
resource "proxmox_vm_qemu" "k3smaster" {
lifecycle {
prevent_destroy = true
}
count = local.k3smaster.count
ciuser = "administrator"
name = local.k3smaster.name[count.index]
target_node = local.k3smaster.node[count.index]
tags = local.k3smaster.tags
full_clone = false
qemu_os = "l26"
os_type = "cloud-init"
agent = 1
cores = local.k3smaster.cores
sockets = 1
cpu_type = "host"
memory = local.k3smaster.memory
scsihw = "virtio-scsi-pci"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.k3smaster.drive
format = local.format
storage = local.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.k3smaster.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}
resource "proxmox_vm_qemu" "k3sserver" {
lifecycle {
prevent_destroy = true
}
count = local.k3sserver.count
ciuser = "administrator"
name = local.k3sserver.name[count.index]
target_node = local.k3sserver.node[count.index]
tags = local.k3sserver.tags
qemu_os = "l26"
full_clone = false
os_type = "cloud-init"
agent = 1
cores = local.k3sserver.cores
sockets = 1
cpu_type = "host"
memory = local.k3sserver.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.k3sserver.drive
format = local.format
storage = local.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.k3sserver.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}

View File

@@ -1,78 +0,0 @@
terraform {
backend "http" {}
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "3.0.1-rc6"
}
}
}
provider "proxmox" {
pm_parallel = 1
pm_tls_insecure = true
pm_api_url = var.pm_api_url
pm_user = var.pm_user
pm_password = var.pm_password
pm_debug = false
}
locals {
sshkeys = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDEphzWgwUZnvL6E5luKLt3WO0HK7Kh63arSMoNl5gmjzXyhG1DDW0OKfoIl0T+JZw/ZjQ7iii6tmSLFRk6nuYCldqe5GVcFxvTzX4/xGEioAyG0IiUGKy6s+9xzO8QXF0EtSNPH0nfHNKcCjgwWAzM+Lt6gW0Vqs+aU5ICuDiEchmvYPz+rBaVldJVTG7m3ogKJ2aIF7HU/pCPp5l0E9gMOw7s0ABijuc3KXLEWCYgL39jIST6pFH9ceRLmu8Xy5zXHAkkEEauY/e6ld0hlzLadiUD7zYJMdDcm0oRvenYcUlaUl9gS0569IpfsJsjCejuqOxCKzTHPJDOT0f9TbIqPXkGq3s9oEJGpQW+Z8g41BqRpjBCdBk+yv39bzKxlwlumDwqgx1WP8xxKavAWYNqNRG7sBhoWwtxYEOhKXoLNjBaeDRnO5OY5AQJvONWpuByyz0R/gTh4bOFVD+Y8WWlKbT4zfhnN70XvapRsbZiaGhJBPwByAMGg6XxSbC6xtbyligVGCEjCXbTLkeKq1w0DuItY+FBGO3J2k90OiciTVSeyiVz9J/Y03UB0gHdsMCoVNrj+9QWfrTLDhM7D5YrXUt5nj2LQTcbtf49zoQXWxUhozlg42E/FJU/Yla7y55qWizAEVyP2/Ks/PHrF679k59HNd2IJ/aicA9QnmWtLQ== ansible"
template = "Debian12-Template"
storage = "cache-domains"
emulatessd = true
format = "raw"
dnsserver = "192.168.12.1"
vlan = 12
k3smaster = {
tags = "k3s_infra"
count = 3
name = ["master01-infra", "master02-infra", "master03-infra"]
cores = 2
memory = "4096"
drive = 20
node = ["mothership", "overlord", "vanguard"]
ip = ["11", "12", "13"]
}
k3sserver = {
tags = "k3s_infra"
count = 3
name = ["node01-infra", "node02-infra", "node03-infra"]
cores = 4
memory = "8192"
drive = 240
node = ["mothership", "overlord", "vanguard"]
ip = ["21", "22", "23"]
}
haproxy = {
tags = "haproxy"
count = 3
name = ["haproxy-01", "haproxy-02", "haproxy-03"]
cores = 2
memory = "1024"
drive = 20
node = ["mothership", "overlord", "vanguard"]
ip = ["31", "32", "33"]
}
postgres = {
tags = "postgres"
count = 3
name = ["postgres-01", "postgres-02", "postgres-03"]
cores = 4
memory = "4096"
drive = 40
node = ["mothership", "overlord", "vanguard"]
ip = ["34", "35", "36"]
}
pihole = {
tags = "pihole"
count = 3
name = ["pihole-01", "pihole-02", "pihole-03"]
cores = 2
memory = "2048"
drive = 20
node = ["mothership", "overlord", "vanguard"]
ip = ["41", "42", "43"]
}
}

View File

@@ -1,57 +0,0 @@
resource "proxmox_vm_qemu" "pihole" {
count = local.pihole.count
ciuser = "administrator"
vmid = "${local.vlan}${local.pihole.ip[count.index]}"
name = local.pihole.name[count.index]
target_node = local.pihole.node[count.index]
clone = local.template
tags = local.pihole.tags
qemu_os = "l26"
full_clone = true
os_type = "cloud-init"
agent = 1
cores = local.pihole.cores
sockets = 1
cpu_type = "host"
memory = local.pihole.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.pihole.drive
format = local.format
storage = local.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.pihole.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}

View File

@@ -1,115 +0,0 @@
resource "proxmox_vm_qemu" "haproxy" {
count = local.haproxy.count
ciuser = "administrator"
vmid = "${local.vlan}${local.haproxy.ip[count.index]}"
name = local.haproxy.name[count.index]
target_node = local.haproxy.node[count.index]
clone = local.template
tags = local.haproxy.tags
qemu_os = "l26"
full_clone = true
os_type = "cloud-init"
agent = 1
cores = local.haproxy.cores
sockets = 1
cpu_type = "host"
memory = local.haproxy.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.haproxy.drive
format = local.format
storage = local.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.haproxy.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}
resource "proxmox_vm_qemu" "postgres" {
count = local.postgres.count
ciuser = "administrator"
vmid = "${local.vlan}${local.postgres.ip[count.index]}"
name = local.postgres.name[count.index]
target_node = local.postgres.node[count.index]
clone = local.template
tags = local.postgres.tags
qemu_os = "l26"
full_clone = true
os_type = "cloud-init"
agent = 1
cores = local.postgres.cores
sockets = 1
cpu_type = "host"
memory = local.postgres.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.postgres.drive
format = local.format
storage = local.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.postgres.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}

View File

@@ -1,14 +0,0 @@
variable "pm_api_url" {
description = "API URL to Proxmox provider"
type = string
}
variable "pm_password" {
description = "Passowrd to Proxmox provider"
type = string
}
variable "pm_user" {
description = "UIsername to Proxmox provider"
type = string
}

View File

@@ -1,11 +0,0 @@
apiVersion: v2
name: traefik
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: traefik
repository: https://traefik.github.io/charts
version: 34.0.0

View File

@@ -1,47 +0,0 @@
traefik:
image:
registry: registry.durp.info
repository: traefik
pullPolicy: Always
deployment:
replicas: 3
revisionHistoryLimit: 1
ingressRoute:
dashboard:
enabled: true
additionalArguments:
- "--serversTransport.insecureSkipVerify=true"
- "--log.level=DEBUG"
- --experimental.plugins.jwt.moduleName=github.com/traefik-plugins/traefik-jwt-plugin
- --experimental.plugins.jwt.version=v0.7.0
autoscaling:
enabled: true
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Pods
value: 1
periodSeconds: 60
# -- [Resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for `traefik` container.
resources:
requests:
cpu: "100m"
memory: "512Mi"
limits:
memory: "512Mi"

Some files were not shown because too many files have changed in this diff Show More