18 Commits

Author SHA1 Message Date
d38a2d2840 update 2024-07-21 09:49:15 -05:00
2b88107a28 Merge branch 'mr-prd' into 'prd'
Mr prd

See merge request developerdurp/homelab!2
2024-07-21 12:58:33 +00:00
d9be744f17 Mr prd 2024-07-21 12:58:33 +00:00
b1a4779d96 update 2024-07-21 07:41:04 -05:00
54432a447f update 2024-07-21 07:39:08 -05:00
779d3448d5 update 2024-07-21 07:33:18 -05:00
7d9ddc574f update 2024-07-21 06:57:15 -05:00
0b4238217b update ingress 2024-07-21 06:50:32 -05:00
39020882f4 update 2024-07-21 06:28:57 -05:00
39f53751ea update 2024-07-21 06:27:08 -05:00
d686567857 update 2024-07-21 06:25:13 -05:00
74bca2946b update 2024-07-21 06:23:44 -05:00
1972f4a965 add metallb 2024-07-21 06:21:02 -05:00
b4f11ea722 update for ollama url 2024-07-20 09:22:12 -05:00
004990a4ca update 2024-06-23 12:15:12 -05:00
pipeline
1481dbe107 Update Chart 2024-05-29 11:29:48 +00:00
0414919e05 update 2024-05-25 07:30:32 -05:00
d5ab5c4671 move all to prd 2024-05-11 06:34:17 -05:00
276 changed files with 915 additions and 8843 deletions

2
.gitignore vendored
View File

@@ -1,3 +1 @@
.idea
infra/terraform/.terraform
infra/terraform/.terraform.lock.hcl

View File

@@ -1,3 +0,0 @@
include:
- local: infra/.gitlab/.gitlab-ci.yml
- local: dmz/.gitlab/.gitlab-ci.yml

View File

@@ -1,4 +0,0 @@
VAULT_HELM_SECRET_NAME=$(kubectl get secrets -n vault --output=json | jq -r '.items[].metadata | select(.name|startswith("vault-token-")).name')
TOKEN_REVIEW_JWT=$(kubectl get secret $VAULT_HELM_SECRET_NAME -n vault --output='go-template={{ .data.token }}' | base64 --decode)
KUBE_CA_CERT=$(kubectl config view --raw --minify --flatten --output='jsonpath={.clusters[].cluster.certificate-authority-data}' | base64 --decode)
KUBE_HOST=$(kubectl config view --raw --minify --flatten --output='jsonpath={.clusters[].cluster.server}')

View File

@@ -1,5 +0,0 @@
- hosts: all
gather_facts: yes
become: yes
roles:
- base

View File

@@ -1,4 +0,0 @@
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Download-Upgradeable-Packages "1";
APT::Periodic::AutocleanInterval "7";
APT::Periodic::Unattended-Upgrade "1";

View File

@@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGhPVgL8gXdRTw0E2FvlOUoUI4vd794nB0nZVIsc+U5M

View File

@@ -1,4 +0,0 @@
Use of this system is restricted to authorized users only, and all use is subjected to an acceptable use policy.
IF YOU ARE NOT AUTHORIZED TO USE THIS SYSTEM, DISCONNECT NOW.

View File

@@ -1,4 +0,0 @@
THIS SYSTEM IS FOR AUTHORIZED USE ONLY
All activities are logged and monitored.

View File

@@ -1,95 +0,0 @@
# Package generated configuration file
# See the sshd_config(5) manpage for details
# What ports, IPs and protocols we listen for
Port 22
# Use these options to restrict which interfaces/protocols sshd will bind to
#ListenAddress ::
#ListenAddress 0.0.0.0
Protocol 2
# HostKeys for protocol version 2
HostKey /etc/ssh/ssh_host_rsa_key
HostKey /etc/ssh/ssh_host_dsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
HostKey /etc/ssh/ssh_host_ed25519_key
#Privilege Separation is turned on for security
UsePrivilegeSeparation yes
# Lifetime and size of ephemeral version 1 server key
KeyRegenerationInterval 3600
ServerKeyBits 1024
# Logging
SyslogFacility AUTH
LogLevel INFO
# Authentication:
LoginGraceTime 120
PermitRootLogin no
StrictModes yes
RSAAuthentication yes
PubkeyAuthentication yes
#AuthorizedKeysFile %h/.ssh/authorized_keys
# Don't read the user's ~/.rhosts and ~/.shosts files
IgnoreRhosts yes
# For this to work you will also need host keys in /etc/ssh_known_hosts
RhostsRSAAuthentication no
# similar for protocol version 2
HostbasedAuthentication no
# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
#IgnoreUserKnownHosts yes
# To enable empty passwords, change to yes (NOT RECOMMENDED)
PermitEmptyPasswords no
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
ChallengeResponseAuthentication no
# Change to no to disable tunnelled clear text passwords
PasswordAuthentication no
# Kerberos options
#KerberosAuthentication no
#KerberosGetAFSToken no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
X11Forwarding no
X11DisplayOffset 10
PrintMotd no
PrintLastLog yes
TCPKeepAlive yes
#UseLogin no
#MaxStartups 10:30:60
#Banner /etc/issue.net
# Allow client to pass locale environment variables
AcceptEnv LANG LC_*
Subsystem sftp /usr/lib/openssh/sftp-server
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the ChallengeResponseAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via ChallengeResponseAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
UsePAM yes
ClientAliveInterval 300
#enable remote powershell
#Subsystem powershell /usr/bin/pwsh -sshs -NoLogo

View File

@@ -1,143 +0,0 @@
- name: Update packages
apt:
name: '*'
state: latest
update_cache: yes
only_upgrade: yes
retries: 300
delay: 10
- name: Remove packages not needed anymore
apt:
autoremove: yes
retries: 300
delay: 10
- name: Install required packages Debian
apt:
state: latest
pkg: "{{ item }}"
with_items: "{{ required_packages }}"
retries: 300
delay: 10
- name: Create user account
user:
name: "user"
shell: /bin/bash
state: present
createhome: yes
- name: ensure ssh folder exists for user
file:
path: /home/user/.ssh
owner: user
group: user
mode: "0700"
state: directory
- name: Deploy SSH Key (user)
copy:
dest: /home/user/.ssh/authorized_keys
src: files/authorized_keys_user
owner: user
group: user
force: true
- name: Remove Root SSH Configuration
file:
path: /root/.ssh
state: absent
- name: Copy Secured SSHD Configuration
copy:
src: files/sshd_config_secured
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: "0644"
when: ansible_os_family == "Debian"
- name: Copy Secured SSHD Configuration
copy:
src: files/sshd_config_secured_redhat
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: "0644"
when: ansible_os_family == "RedHat"
- name: Restart SSHD
systemd:
name: sshd
daemon_reload: yes
state: restarted
enabled: yes
ignore_errors: yes
- name: Copy unattended-upgrades file
copy:
src: files/10periodic
dest: /etc/apt/apt.conf.d/10periodic
owner: root
group: root
mode: "0644"
force: yes
when: ansible_os_family == "Debian"
- name: Remove undesirable packages
package:
name: "{{ unnecessary_software }}"
state: absent
when: ansible_os_family == "Debian"
- name: Stop and disable unnecessary services
service:
name: "{{ item }}"
state: stopped
enabled: no
with_items: "{{ unnecessary_services }}"
ignore_errors: yes
- name: Set a message of the day
copy:
dest: /etc/motd
src: files/motd
owner: root
group: root
mode: 0644
- name: Set a login banner
copy:
dest: "{{ item }}"
src: files/issue
owner: root
group: root
mode: 0644
with_items:
- /etc/issue
- /etc/issue.net
- name: set timezone
shell: timedatectl set-timezone America/Chicago
- name: Enable cockpit
systemd:
name: cockpit
daemon_reload: yes
state: restarted
enabled: yes
- name: change password
ansible.builtin.user:
name: "user"
state: present
password: "{{ lookup('ansible.builtin.env', 'USER_PASSWORD') | password_hash('sha512') }}"
- name: add user to sudoers
community.general.sudoers:
name: user
state: present
user: user
commands: ALL

View File

@@ -1,17 +0,0 @@
required_packages:
- ufw
- qemu-guest-agent
- fail2ban
- unattended-upgrades
- cockpit
- nfs-common
- open-iscsi
unnecessary_services:
- postfix
- telnet
unnecessary_software:
- tcpdump
- nmap-ncat
- wpa_supplicant

View File

@@ -9,6 +9,6 @@ appVersion: "1.16.0"
dependencies:
- name: argo-cd
repository: https://argoproj.github.io/argo-helm
version: 6.11.1
version: 6.7.11

View File

@@ -1,16 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: litellm
name: argocd
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/litellm
targetRevision: prd
path: argocd
destination:
namespace: litellm
namespace: argocd
name: in-cluster
syncPolicy:
automated:

View File

@@ -0,0 +1,21 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: authentik
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: prd
path: authentik
destination:
namespace: authentik
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/bitwarden
targetRevision: prd
path: bitwarden
directory:
recurse: true
destination:

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/cert-manager
targetRevision: prd
path: cert-manager
destination:
namespace: cert-manager
name: in-cluster

View File

@@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: crossplane
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: prd
path: crossplane
destination:
namespace: crossplane
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/durpapi
targetRevision: prd
path: durpapi
destination:
namespace: durpapi
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/durpot
targetRevision: prd
path: durpot
destination:
namespace: durpot
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/external-dns
targetRevision: prd
path: external-dns
destination:
namespace: external-dns
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/external-secrets
targetRevision: prd
path: external-secrets
destination:
namespace: external-secrets
name: in-cluster

View File

@@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: gatekeeper
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: prd
path: gatekeeper
destination:
namespace: gatekeeper
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,21 +1,21 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: gitlab-runner-dmz
name: gitlab-runner
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: dmz/gitlab-runner
targetRevision: prd
path: gitlab-runner
destination:
namespace: gitlab-runner
name: dmz
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- CreateNamespace=true

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/heimdall
targetRevision: prd
path: heimdall
destination:
namespace: heimdall
name: in-cluster

View File

@@ -0,0 +1,36 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: argocd-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
spec:
entryPoints:
- websecure
routes:
- match: Host(`argocd.internal.prd.durp.info`)
middlewares:
- name: internal-only
namespace: traefik
kind: Rule
services:
- name: argocd-server
port: 443
scheme: https
tls:
secretName: argocd-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: argocd-tls
spec:
secretName: argocd-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "argocd.internal.prd.durp.info"
dnsNames:
- "argocd.internal.prd.durp.info"

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/krakend
targetRevision: prd
path: krakend
destination:
namespace: krakend
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/kube-prometheus-stack
targetRevision: prd
path: kube-prometheus-stack
destination:
namespace: kube-prometheus-stack
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/kubeclarity
targetRevision: prd
path: kubeclarity
destination:
namespace: kubeclarity
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/littlelink
targetRevision: prd
path: littlelink
directory:
recurse: true
destination:

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/longhorn
targetRevision: prd
path: longhorn
destination:
namespace: longhorn-system
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/metallb-system
targetRevision: prd
path: metallb-system
destination:
namespace: metallb-system
name: in-cluster
@@ -19,4 +19,3 @@ spec:
syncOptions:
- CreateNamespace=true

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/nfs-client
targetRevision: prd
path: nfs-client
directory:
recurse: true
destination:

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/open-webui
targetRevision: prd
path: open-webui
destination:
namespace: open-webui
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/traefik
targetRevision: prd
path: traefik
destination:
namespace: traefik
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/uptimekuma
targetRevision: prd
path: uptimekuma
directory:
recurse: true
destination:

View File

@@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vault
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: prd
path: vault
destination:
namespace: vault
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- group: admissionregistration.k8s.io
kind: MutatingWebhookConfiguration
jqPathExpressions:
- .webhooks[]?.clientConfig.caBundle

View File

@@ -33,13 +33,13 @@ argo-cd:
cm:
create: true
annotations: {}
url: https://argocd.internal.durp.info
url: https://argocd.internal.prd.durp.info
oidc.tls.insecure.skip.verify: "true"
dex.config: |
connectors:
- config:
issuer: https://authentik.durp.info/application/o/argocd/
clientID: dbb8ffc06104fb6e7fac3e4ae7fafb1d90437625
issuer: https://authentik.prd.durp.info/application/o/argocd/
clientID: lKuMgyYaOlQMNAUSjsRVYgkwZG9UT6CeFWeTLAcl
clientSecret: $client-secret:clientSecret
insecureEnableGroups: true
scopes:

View File

@@ -9,4 +9,4 @@ appVersion: "1.16.0"
dependencies:
- name: authentik
repository: https://charts.goauthentik.io
version: 2024.8.3
version: 2024.4.1

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.io/v1alpha1
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: authentik-ingress
@@ -6,11 +6,11 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`authentik.durp.info`) && PathPrefix(`/`)
- match: Host(`authentik.prd.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: infra-cluster
port: 443
- name: authentik-server
port: 80
tls:
secretName: authentik-tls
@@ -21,13 +21,13 @@ kind: Certificate
metadata:
name: authentik-tls
spec:
secretName: authentik-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
secretName: authentik-tls
commonName: "authentik.durp.info"
commonName: "authentik.prd.durp.info"
dnsNames:
- "authentik.durp.info"
- "authentik.prd.durp.info"
---
@@ -36,7 +36,7 @@ apiVersion: v1
metadata:
name: authentik-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: authentik.durp.info
external-dns.alpha.kubernetes.io/hostname: authentik.prd.durp.info
spec:
type: ExternalName
externalName: durp.info
externalName:.prd.durp.info

View File

@@ -1,6 +1,8 @@
authentik:
global:
env:
- name: AUTHENTIK_REDIS__DB
value: "1"
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
@@ -13,11 +15,11 @@ authentik:
key: secretkey
revisionHistoryLimit: 1
image:
repository: registry.durp.info/goauthentik/server
repository: registry.internal.durp.info/goauthentik/server
pullPolicy: Always
authentik:
outposts:
container_image_base: registry.durp.info/goauthentik/%(type)s:%(version)s
container_image_base: registry.internal.durp.info/goauthentik/%(type)s:%(version)s
postgresql:
host: '{{ .Release.Name }}-postgresql-hl'
name: "authentik"
@@ -26,30 +28,31 @@ authentik:
server:
name: server
replicas: 3
worker:
replicas: 3
postgresql:
enabled: true
image:
registry: registry.durp.info
registry: registry.internal.durp.info
repository: bitnami/postgresql
pullPolicy: Always
postgresqlUsername: "authentik"
postgresqlDatabase: "authentik"
existingSecret: db-pass
auth:
username: "authentik"
existingSecret: db-pass
secretKeys:
adminPasswordKey: dbpass
userPasswordKey: dbpass
#postgresqlUsername: "authentik"
#postgresqlDatabase: "authentik"
#existingSecret: db-pass
persistence:
enabled: true
storageClass: longhorn
size: 16Gi
accessModes:
- ReadWriteMany
redis:
enabled: true
master:
persistence:
enabled: false
image:
registry: registry.durp.info
registry: registry.internal.durp.info
repository: bitnami/redis
pullPolicy: Always
architecture: standalone

View File

@@ -17,7 +17,7 @@ spec:
spec:
containers:
- name: bitwarden
image: registry.internal.durp.info/vaultwarden/server:1.32.7
image: registry.internal.durp.info/vaultwarden/server:1.30.5
imagePullPolicy: Always
volumeMounts:
- name: bitwarden-pvc
@@ -28,7 +28,7 @@ spec:
containerPort: 80
env:
- name: SIGNUPS_ALLOWED
value: "FALSE"
value: "TRUE"
- name: INVITATIONS_ALLOWED
value: "FALSE"
- name: WEBSOCKET_ENABLED
@@ -39,7 +39,7 @@ spec:
value: "80"
- name: ROCKET_WORKERS
value: "10"
- name: SECRET_USERNAME
- name: ADMIN_TOKEN
valueFrom:
secretKeyRef:
name: bitwarden-secret

View File

@@ -0,0 +1,63 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: bitwarden-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`bitwarden.prd.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: bitwarden
port: 80
tls:
secretName: bitwarden-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: bitwarden-tls
spec:
secretName: bitwarden-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "bitwarden.prd.durp.info"
dnsNames:
- "bitwarden.prd.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: bitwarden-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: bitwarden.prd.durp.info
spec:
type: ExternalName
externalName:.prd.durp.info
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: bitwarden-admin-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`bitwarden.prd.durp.info`) && PathPrefix(`/admin`)
kind: Rule
middlewares:
- name: whitelist
namespace: traefik
services:
- name: bitwarden
port: 80
tls:
secretName: bitwarden-tls

View File

@@ -8,4 +8,4 @@ appVersion: 0.0.1
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.15.3
version: 1.*.*

View File

@@ -0,0 +1,13 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-issuer
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: selfsigned-cluster-issuer
spec:
selfSigned: {}

View File

@@ -9,4 +9,4 @@ appVersion: "1.16.0"
dependencies:
- name: crossplane
repository: https://charts.crossplane.io/stable
version: 1.17.1
version: 1.16.0

View File

@@ -3,7 +3,7 @@ kind: Provider
metadata:
name: provider-gitlab
spec:
package: xpkg.upbound.io/crossplane-contrib/provider-gitlab:v0.5.0
package: xpkg.upbound.io/crossplane-contrib/provider-gitlab:v0.7.0
---
apiVersion: external-secrets.io/v1beta1

186
crossplane/values.yaml Normal file
View File

@@ -0,0 +1,186 @@
# helm-docs renders these comments into markdown. Use markdown formatting where
# appropiate.
#
# -- The number of Crossplane pod `replicas` to deploy.
replicas: 1
# -- The deployment strategy for the Crossplane and RBAC Manager pods.
deploymentStrategy: RollingUpdate
image:
# -- Repository for the Crossplane pod image.
repository: xpkg.upbound.io/crossplane/crossplane
# -- The Crossplane image tag. Defaults to the value of `appVersion` in `Chart.yaml`.
tag: ""
# -- The image pull policy used for Crossplane and RBAC Manager pods.
pullPolicy: IfNotPresent
# -- Add `nodeSelectors` to the Crossplane pod deployment.
nodeSelector: {}
# -- Add `tolerations` to the Crossplane pod deployment.
tolerations: []
# -- Add `affinities` to the Crossplane pod deployment.
affinity: {}
# -- Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet`.
hostNetwork: false
# -- Specify the `dnsPolicy` to be used by the Crossplane pod.
dnsPolicy: ""
# -- Add custom `labels` to the Crossplane pod deployment.
customLabels: {}
# -- Add custom `annotations` to the Crossplane pod deployment.
customAnnotations: {}
serviceAccount:
# -- Add custom `annotations` to the Crossplane ServiceAccount.
customAnnotations: {}
# -- Enable [leader election](https://docs.crossplane.io/latest/concepts/pods/#leader-election) for the Crossplane pod.
leaderElection: true
# -- Add custom arguments to the Crossplane pod.
args: []
provider:
# -- A list of Provider packages to install.
packages: []
configuration:
# -- A list of Configuration packages to install.
packages: []
function:
# -- A list of Function packages to install
packages: []
# -- The imagePullSecret names to add to the Crossplane ServiceAccount.
imagePullSecrets: []
registryCaBundleConfig:
# -- The ConfigMap name containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates.
name: ""
# -- The ConfigMap key containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates.
key: ""
service:
# -- Configure annotations on the service object. Only enabled when webhooks.enabled = true
customAnnotations: {}
webhooks:
# -- Enable webhooks for Crossplane and installed Provider packages.
enabled: true
rbacManager:
# -- Deploy the RBAC Manager pod and its required roles.
deploy: true
# -- Don't install aggregated Crossplane ClusterRoles.
skipAggregatedClusterRoles: false
# -- The number of RBAC Manager pod `replicas` to deploy.
replicas: 1
# -- Enable [leader election](https://docs.crossplane.io/latest/concepts/pods/#leader-election) for the RBAC Manager pod.
leaderElection: true
# -- Add custom arguments to the RBAC Manager pod.
args: []
# -- Add `nodeSelectors` to the RBAC Manager pod deployment.
nodeSelector: {}
# -- Add `tolerations` to the RBAC Manager pod deployment.
tolerations: []
# -- Add `affinities` to the RBAC Manager pod deployment.
affinity: {}
# -- The PriorityClass name to apply to the Crossplane and RBAC Manager pods.
priorityClassName: ""
resourcesCrossplane:
limits:
# -- CPU resource limits for the Crossplane pod.
cpu: 500m
# -- Memory resource limits for the Crossplane pod.
memory: 1024Mi
requests:
# -- CPU resource requests for the Crossplane pod.
cpu: 100m
# -- Memory resource requests for the Crossplane pod.
memory: 256Mi
securityContextCrossplane:
# -- The user ID used by the Crossplane pod.
runAsUser: 65532
# -- The group ID used by the Crossplane pod.
runAsGroup: 65532
# -- Enable `allowPrivilegeEscalation` for the Crossplane pod.
allowPrivilegeEscalation: false
# -- Set the Crossplane pod root file system as read-only.
readOnlyRootFilesystem: true
packageCache:
# -- Set to `Memory` to hold the package cache in a RAM backed file system. Useful for Crossplane development.
medium: ""
# -- The size limit for the package cache. If medium is `Memory` the `sizeLimit` can't exceed Node memory.
sizeLimit: 20Mi
# -- The name of a PersistentVolumeClaim to use as the package cache. Disables the default package cache `emptyDir` Volume.
pvc: ""
# -- The name of a ConfigMap to use as the package cache. Disables the default package cache `emptyDir` Volume.
configMap: ""
resourcesRBACManager:
limits:
# -- CPU resource limits for the RBAC Manager pod.
cpu: 100m
# -- Memory resource limits for the RBAC Manager pod.
memory: 512Mi
requests:
# -- CPU resource requests for the RBAC Manager pod.
cpu: 100m
# -- Memory resource requests for the RBAC Manager pod.
memory: 256Mi
securityContextRBACManager:
# -- The user ID used by the RBAC Manager pod.
runAsUser: 65532
# -- The group ID used by the RBAC Manager pod.
runAsGroup: 65532
# -- Enable `allowPrivilegeEscalation` for the RBAC Manager pod.
allowPrivilegeEscalation: false
# -- Set the RBAC Manager pod root file system as read-only.
readOnlyRootFilesystem: true
metrics:
# -- Enable Prometheus path, port and scrape annotations and expose port 8080 for both the Crossplane and RBAC Manager pods.
enabled: false
# -- Add custom environmental variables to the Crossplane pod deployment.
# Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`.
extraEnvVarsCrossplane: {}
# -- Add custom environmental variables to the RBAC Manager pod deployment.
# Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`.
extraEnvVarsRBACManager: {}
# -- Add a custom `securityContext` to the Crossplane pod.
podSecurityContextCrossplane: {}
# -- Add a custom `securityContext` to the RBAC Manager pod.
podSecurityContextRBACManager: {}
# -- Add custom `volumes` to the Crossplane pod.
extraVolumesCrossplane: {}
# -- Add custom `volumeMounts` to the Crossplane pod.
extraVolumeMountsCrossplane: {}
# -- To add arbitrary Kubernetes Objects during a Helm Install
extraObjects: []
# - apiVersion: pkg.crossplane.io/v1alpha1
# kind: ControllerConfig
# metadata:
# name: aws-config
# annotations:
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789101:role/example
# helm.sh/hook: post-install
# spec:
# podSecurityContext:
# fsGroup: 2000

View File

@@ -1,95 +0,0 @@
stages:
- plan
- apply
- destroy
variables:
WORKDIR: $CI_PROJECT_DIR/dmz/terraform
GITLAB_TF_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/dmz
image:
name: registry.durp.info/opentofu/opentofu:latest
entrypoint: [""]
.tf-init:
before_script:
- cd $WORKDIR
- tofu init
-reconfigure
-backend-config="address=${GITLAB_TF_ADDRESS}"
-backend-config="lock_address=${GITLAB_TF_ADDRESS}/lock"
-backend-config="unlock_address=${GITLAB_TF_ADDRESS}/lock"
-backend-config="username=gitlab-ci-token"
-backend-config="password=${CI_JOB_TOKEN}"
-backend-config="lock_method=POST"
-backend-config="unlock_method=DELETE"
-backend-config="retry_wait_min=5"
format:
stage: .pre
allow_failure: false
script:
- cd $WORKDIR
- tofu fmt -diff -check -write=false
rules:
- changes:
- "dmz/terraform/*.tf"
validate:
stage: .pre
allow_failure: false
extends: .tf-init
script:
- tofu validate
rules:
- changes:
- "dmz/terraform/*.tf"
plan-dmz-infrastructure:
stage: plan
variables:
PLAN: plan.tfplan
JSON_PLAN_FILE: tfplan.json
ENVIRONMENT_NAME: dmz
allow_failure: false
extends: .tf-init
script:
- apk add --update curl jq
- alias convert_report="jq -r '([.resource_changes[].change.actions?]|flatten)|{\"create\":(map(select(.==\"create\"))|length),\"update\":(map(select(.==\"update\"))|length),\"delete\":(map(select(.==\"delete\"))|length)}'"
- tofu plan -out=$PLAN $ARGUMENTS
- tofu show --json $PLAN | jq -r '([.resource_changes[].change.actions?]|flatten)|{"create":(map(select(.=="create"))|length),"update":(map(select(.=="update"))|length),"delete":(map(select(.=="delete"))|length)}' > $JSON_PLAN_FILE
artifacts:
reports:
terraform: $WORKDIR/$JSON_PLAN_FILE
needs: ["validate","format"]
rules:
- changes:
- "dmz/terraform/*.tf"
apply-dmz-infrastructure:
stage: apply
variables:
ENVIRONMENT_NAME: dmz
allow_failure: false
extends: .tf-init
script:
- tofu apply -auto-approve $ARGUMENTS
rules:
- changes:
- "dmz/terraform/*.tf"
when: manual
needs: ["plan-dmz-infrastructure"]
destroy-dmz-infrastructure:
stage: destroy
variables:
ENVIRONMENT_NAME: dmz
allow_failure: false
extends: .tf-init
script:
- tofu destroy -auto-approve $ARGUMENTS
rules:
- changes:
- "dmz/terraform/*.tf"
when: manual
needs: ["plan-dmz-infrastructure"]

View File

@@ -1,12 +0,0 @@
apiVersion: v2
name: authentik
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.16.0"
dependencies:
- name: authentik-remote-cluster
repository: https://charts.goauthentik.io
version: 2.0.0

View File

@@ -1,30 +0,0 @@
authentik-remote-cluster:
# -- Provide a name in place of `authentik`. Prefer using global.nameOverride if possible
nameOverride: ""
# -- String to fully override `"authentik.fullname"`. Prefer using global.fullnameOverride if possible
fullnameOverride: ""
# -- Override the Kubernetes version, which is used to evaluate certain manifests
kubeVersionOverride: ""
## Globally shared configuration for authentik components.
global:
# -- Provide a name in place of `authentik`
nameOverride: ""
# -- String to fully override `"authentik.fullname"`
fullnameOverride: ""
# -- A custom namespace to override the default namespace for the deployed resources.
namespaceOverride: ""
# -- Common labels for all resources.
additionalLabels: {}
# app: authentik
# -- Annotations to apply to all resources
annotations: {}
serviceAccountSecret:
# -- Create a secret with the service account credentials
enabled: true
clusterRole:
# -- Create a clusterole in addition to a namespaced role.
enabled: true

View File

@@ -1,11 +0,0 @@
apiVersion: v2
name: cert-manager
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.16.3

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: issuer
secrets:
- name: issuer-token-lmzpj
---
apiVersion: v1
kind: Secret
metadata:
name: issuer-token-lmzpj
annotations:
kubernetes.io/service-account.name: issuer
type: kubernetes.io/service-account-token

File diff suppressed because one or more lines are too long

View File

@@ -1,22 +0,0 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: cloudflare-api-token-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: cloudflare-api-token-secret
data:
- secretKey: cloudflare-api-token-secret
remoteRef:
key: kv/cert-manager
property: cloudflare-api-token-secret
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault

View File

@@ -1,26 +0,0 @@
cert-manager:
crds:
enabled: true
image:
registry: registry.internal.durp.info
repository: jetstack/cert-manager-controller
pullPolicy: Always
replicaCount: 3
extraArgs:
- --dns01-recursive-nameservers=1.1.1.1:53,1.0.0.1:53
- --dns01-recursive-nameservers-only
podDnsPolicy: None
podDnsConfig:
nameservers:
- "1.1.1.1"
- "1.0.0.1"
webhook:
image:
registry: registry.internal.durp.info
repository: jetstack/cert-manager-webhook
pullPolicy: Always
cainjector:
image:
registry: registry.internal.durp.info
repository: jetstack/cert-manager-cainjector
pullPolicy: Always

View File

@@ -1,30 +0,0 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: external-dns-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: external-dns
data:
- secretKey: cloudflare_api_email
remoteRef:
key: kv/cloudflare
property: cloudflare_api_email
- secretKey: cloudflare_api_key
remoteRef:
key: kv/cloudflare
property: cloudflare_api_key
- secretKey: cloudflare_api_token
remoteRef:
key: kv/cloudflare
property: cloudflare_api_token
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault

View File

@@ -1,18 +0,0 @@
external-dns:
global:
imageRegistry: "registry.durp.info"
image:
pullPolicy: Always
txtPrefix: "dmz-"
sources:
- service
provider: cloudflare
cloudflare:
secretName : "external-dns"
proxied: false
policy: sync

View File

@@ -1,11 +0,0 @@
apiVersion: v2
name: external-secrets
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: external-secrets
repository: https://charts.external-secrets.io
version: 0.13.0

View File

@@ -1,81 +0,0 @@
apiVersion: v1
data:
vault.pem: |
-----BEGIN CERTIFICATE-----
MIIEszCCA5ugAwIBAgIUZEzzxqEuYiKHkL1df+Cb22NRRJMwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAxMJZHVycC5pbmZvMB4XDTI1MDEyMzIyMzQ0MloXDTM1MDEy
MTExMTU1NVowIDEeMBwGA1UEAxMVdmF1bHQuaW5mcmEuZHVycC5pbmZvMIIBIjAN
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAkZM0ue4bMcmmATs+kGYSpR2hLUzq
scGIwCtqmaKCMbd1xhmgjnIR3zvSRptLR2GVGvc1ti6qby0jXYvcqbxkHvay00zW
2zYN+M2m4lXpuWzg1t6NEoO6XGAsGj2v0vcVktPPU9uj0rGUVGWWfsvjoXqQFg5I
jdxsxK9SvMvw2XtE3FgKxpzCyw94InIHlcPwFTO+3ZdKStZlMbUDIkmszLBrWFcr
XOsPDfLxqMy0Ck//LKIt8djh3254FHB1GG5+kI+JSW1o+tUcL2NymvIINwm/2acS
1uTm+j9W7iEXav0pJNmm+/dzSskc3Y0ftM0h2HCXgitBIaEZnUVneNHOLwIDAQAB
o4IB7zCCAeswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
BBYEFCaQ2q7j7LyBGETEZ5qaJAdlISKCMB8GA1UdIwQYMBaAFO1jCyGkpFO+QiR2
dfBMWVYeWrQ2MIH0BggrBgEFBQcBAQSB5zCB5DAzBggrBgEFBQcwAYYnaHR0cHM6
Ly8xOTIuMTY4LjIwLjI1Mzo4MjAxL3YxL3BraS9vY3NwMD0GCCsGAQUFBzABhjFo
dHRwczovL3Jvb3QtdmF1bHQuaW50ZXJuYWwuZHVycC5pbmZvL3YxL3BraS9vY3Nw
MDEGCCsGAQUFBzAChiVodHRwczovLzE5Mi4xNjguMjAuMjUzOjgyMDEvdjEvcGtp
L2NhMDsGCCsGAQUFBzAChi9odHRwczovL3Jvb3QtdmF1bHQuaW50ZXJuYWwuZHVy
cC5pbmZvL3YxL3BraS9jYTAgBgNVHREEGTAXghV2YXVsdC5pbmZyYS5kdXJwLmlu
Zm8wbwYDVR0fBGgwZjAsoCqgKIYmaHR0cHM6Ly8xOTIuMTY4LjIwLjI1Mzo4MjAx
L3YxL3BraS9jcmwwNqA0oDKGMGh0dHBzOi8vcm9vdC12YXVsdC5pbnRlcm5hbC5k
dXJwLmluZm8vdjEvcGtpL2NybDANBgkqhkiG9w0BAQsFAAOCAQEAuJ+lplY/+A5L
5LzkljbKDTy3U6PLv1LtxqVCOFGiJXBnXMjtVW07bBEUadzFRNW8GHQ3w5QzOG6k
/vE/TrrJho7l05J/uc+BUrPSNjefLmQV6hn4jrP86PR0vzRfbSqKKBIID9M7+zi6
GFvHlVkSHsQyMQp7JOoax9KVzW2Y+OIgw7Lgw2tP122WCt2SIF0QenoZHsoW0guj
tzTJRmJDjn6XeJ7L3FPkf37H6ub0Jg3zBGr6eorEFfYZNN5CXezjqMFBpRdq4UIo
1M3A7o3uyZFcFsp/vGDcMBkwaCsBV9idu/HwkvGaTUNI285ilBORPD0bMZnACq/9
+Q/cdsO5lg==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEmzCCA4OgAwIBAgIUQwCAs82sgSuiaVbjANHScO2DSfAwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAxMJZHVycC5pbmZvMB4XDTI1MDEyMzExMjEyNVoXDTM1MDEy
MTExMTU1NVowFDESMBAGA1UEAxMJZHVycC5pbmZvMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAn9fjGRqqFsqguz56X6cXZwEMtD9wElwSFCb4Fc8YTzlH
4fV13QwXKESLE/Q+7bw4y4FJQ8BiGNbxxbQOOgWhfGGlQyFa1lfhJtYLfqRN5C2/
S7nr0YxDB9duc4OAExVL6Pr4/Koc+vDZY03l7RzwnF2AOM9DjFTASw01TphCQjRk
U+upiN2TUhUPejV/gMR+zXM6pn98UBKG1dNubS0HzAMwAEXAPm141NDyWUCPT9+3
6P03Ka8mUTx3X49OCtvJEGEQbtlnTFQaOSkP1yLW+XRMHw3sQaV2PWXu5fInbEpZ
+SuzmgLOXtmQNmHLav9q1qeTVkpBGPWvfh2Vh1JJhQIDAQABo4IB4zCCAd8wDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJaP17f1Zw0V
55Ks9Uf0USVWl0BPMB8GA1UdIwQYMBaAFO1jCyGkpFO+QiR2dfBMWVYeWrQ2MIH0
BggrBgEFBQcBAQSB5zCB5DAzBggrBgEFBQcwAYYnaHR0cHM6Ly8xOTIuMTY4LjIw
LjI1Mzo4MjAxL3YxL3BraS9vY3NwMD0GCCsGAQUFBzABhjFodHRwczovL3Jvb3Qt
dmF1bHQuaW50ZXJuYWwuZHVycC5pbmZvL3YxL3BraS9vY3NwMDEGCCsGAQUFBzAC
hiVodHRwczovLzE5Mi4xNjguMjAuMjUzOjgyMDEvdjEvcGtpL2NhMDsGCCsGAQUF
BzAChi9odHRwczovL3Jvb3QtdmF1bHQuaW50ZXJuYWwuZHVycC5pbmZvL3YxL3Br
aS9jYTAUBgNVHREEDTALgglkdXJwLmluZm8wbwYDVR0fBGgwZjAsoCqgKIYmaHR0
cHM6Ly8xOTIuMTY4LjIwLjI1Mzo4MjAxL3YxL3BraS9jcmwwNqA0oDKGMGh0dHBz
Oi8vcm9vdC12YXVsdC5pbnRlcm5hbC5kdXJwLmluZm8vdjEvcGtpL2NybDANBgkq
hkiG9w0BAQsFAAOCAQEAiqAZ4zNIEkCWcvpDRq0VyJuk59sVtJr5X4FscHQ179nE
QbbvMe+EBDFS6XQml1Elj8jiPa/D5O9Oc6Iisnm5+weZKwApz/lQ+XVkWLCoEplB
ZZ9fcWVCbMLt0xlt8qn5z/mYKfbCT7ZCqDO+prQZt+ADJcQbiknfroAAqEbNKxwN
Y9uUyOWNF3SxJEch4w2dtX+IEVmxeZnhMy8OuP0SQKl8aW40ugiG0ZD5yTBBfOD9
zsrGSU/iSatn0b7bevBhaL96hz1/rNR1cL+4/albX2hrr8Rv3/SB2DLtNQlQW0ls
AfhXAqP5zL+Ytgf1Of/pVdgnhxrYUY7RKCSGY5Hagw==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDLzCCAhegAwIBAgIUNHdvOzam2HPVdwXpMHUy4wl8ZRYwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAxMJZHVycC5pbmZvMB4XDTI1MDEyMzExMTUyNVoXDTM1MDEy
MTExMTU1NVowFDESMBAGA1UEAxMJZHVycC5pbmZvMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEA8XDTVEtRI3+k4yuvqVqfIiLRQJcXbmhfVtAeYk+5j9Ox
p1w9YHdnPLqLFrD1PzadjqYeAp/fwlEFfs6lqwoTS8S9vhaFqcgB57nVMb77dTBb
/08XHXOU6FPRjdFKm5QMpS7tn1XacPMy/o0bKqRREQeiuFDGVRyuF5PUgvWc1dvJ
l27JvvgYktgjfpNS4DlCxg4lGXT5abvaKf2hnr65egaIo/yRWN9wnvAzRiY7oci7
GA1oKz87Yc1tfL2gcynrwccOOCF/eUKesJR1I6GXNkN/a1fcr+Ld9Z9NhHBtO+vE
N8DsZY+kG7DE3M4BCCTFUzllcYHjaW4HaF9vZW+PYwIDAQABo3kwdzAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU7WMLIaSkU75CJHZ1
8ExZVh5atDYwHwYDVR0jBBgwFoAU7WMLIaSkU75CJHZ18ExZVh5atDYwFAYDVR0R
BA0wC4IJZHVycC5pbmZvMA0GCSqGSIb3DQEBCwUAA4IBAQAS/qUI/1Yv07xUTK5k
r93kC7GSPpmpkXIsfjChAl93sebN143fu70NUP74jjCc0Wkb8hRofGg10E+/24r1
AI0KsLhzKzfIASxUVQAn8RTptLruaaPLboSA4MUZ8IB5y8Vy8E3/KtD0gD80j64Y
rm9XGHA0HTJHbPUTb/Rux2g0E7WtiyWSWH8mqzbegU8IrkM3eVT4+ylBE7YkfWDD
dw44sB71tfmDKpzWg6XQ6YMh0YfnyG1fYCj9LhuecNY9Uuo6cjDaAvkzMewWwqDx
Q2Ekas98Di6itCP8vET+gBDjeCc+XR6Hx6vzWmxlZhwDuxEKL1a2/DabUxJyMNzv
55Fn
-----END CERTIFICATE-----
kind: ConfigMap
metadata:
name: ca-pemstore

View File

@@ -1,94 +0,0 @@
external-secrets:
replicaCount: 3
revisionHistoryLimit: 1
leaderElect: true
installCRDs: true
crds:
createClusterExternalSecret: true
createClusterSecretStore: true
createClusterGenerator: true
createPushSecret: true
conversion:
enabled: false
image:
repository: registry.durp.info/external-secrets/external-secrets
pullPolicy: Always
extraVolumes:
- name: ca-pemstore
configMap:
name: ca-pemstore
extraVolumeMounts:
- name: ca-pemstore
mountPath: /etc/ssl/certs/vault.pem
subPath: vault.pem
readOnly: true
resources:
requests:
memory: 32Mi
cpu: 10m
limits:
memory: 32Mi
cpu: 10m
webhook:
create: false
failurePolicy: Ignore
log:
level: debug
image:
repository: registry.durp.info/external-secrets/external-secrets
pullPolicy: Always
extraVolumes:
- name: ca-pemstore
configMap:
name: ca-pemstore
extraVolumeMounts:
- name: ca-pemstore
mountPath: /etc/ssl/certs/vault.pem
subPath: vault.pem
readOnly: true
resources:
requests:
memory: 32Mi
cpu: 10m
limits:
memory: 32Mi
cpu: 10m
certController:
create: false
revisionHistoryLimit: 1
log:
level: debug
image:
repository: registry.durp.info/external-secrets/external-secrets
pullPolicy: Always
tag: ""
resources:
requests:
memory: 32Mi
cpu: 10m
limits:
memory: 32Mi
cpu: 10m
extraVolumes:
- name: ca-pemstore
configMap:
name: ca-pemstore
extraVolumeMounts:
- name: ca-pemstore
mountPath: /etc/ssl/certs/vault.pem
subPath: vault.pem
readOnly: true

View File

@@ -1,15 +0,0 @@
apiVersion: v2
name: gitlab-runner
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: gitlab-runner
repository: https://charts.gitlab.io/
version: 0.69.0
- name: gitlab-runner
repository: https://charts.gitlab.io/
version: 0.69.0
alias: personal

View File

@@ -1,44 +0,0 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: gitlab-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: gitlab-secret
data:
- secretKey: runner-registration-token
remoteRef:
key: kv/gitlab/runner
property: runner-registration-token
- secretKey: runner-token
remoteRef:
key: kv/gitlab/runner
property: runner-token
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: gitlab-secret-personal
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: gitlab-secret
data:
- secretKey: runner-token
remoteRef:
key: kv/gitlab/runner
property: runner-token-personal

View File

@@ -1,143 +0,0 @@
gitlab-runner:
image:
registry: registry.durp.info
image: gitlab-org/gitlab-runner
imagePullPolicy: Always
gitlabUrl: https://gitlab.com/
unregisterRunner: false
terminationGracePeriodSeconds: 3600
concurrent: 10
checkInterval: 30
rbac:
create: true
rules: []
clusterWideAccess: false
podSecurityPolicy:
enabled: false
resourceNames:
- gitlab-runner
metrics:
enabled: true
serviceMonitor:
enabled: true
service:
enabled: true
annotations: {}
runners:
config: |
[[runners]]
[runners.kubernetes]
namespace = "{{.Release.Namespace}}"
image = "ubuntu:22.04"
privileged = true
executor: kubernetes
name: "k3s"
runUntagged: true
privileged: true
secret: gitlab-secret
#builds:
#cpuLimit: 200m
#cpuLimitOverwriteMaxAllowed: 400m
#memoryLimit: 256Mi
#memoryLimitOverwriteMaxAllowed: 512Mi
#cpuRequests: 100m
#cpuRequestsOverwriteMaxAllowed: 200m
#memoryRequests: 128Mi
#memoryRequestsOverwriteMaxAllowed: 256Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
runAsNonRoot: true
privileged: false
capabilities:
drop: ["ALL"]
podSecurityContext:
runAsUser: 100
fsGroup: 65533
resources:
limits:
memory: 2Gi
requests:
memory: 128Mi
cpu: 500m
personal:
image:
registry: registry.durp.info
image: gitlab-org/gitlab-runner
imagePullPolicy: Always
gitlabUrl: https://gitlab.com/
unregisterRunner: false
terminationGracePeriodSeconds: 3600
concurrent: 10
checkInterval: 30
rbac:
create: true
rules: []
clusterWideAccess: false
podSecurityPolicy:
enabled: false
resourceNames:
- gitlab-runner
metrics:
enabled: true
serviceMonitor:
enabled: true
service:
enabled: true
annotations: {}
runners:
config: |
[[runners]]
[runners.kubernetes]
namespace = "{{.Release.Namespace}}"
image = "ubuntu:22.04"
privileged = true
executor: kubernetes
name: "k3s"
runUntagged: true
privileged: true
secret: gitlab-secret-personal
#builds:
#cpuLimit: 200m
#cpuLimitOverwriteMaxAllowed: 400m
#memoryLimit: 256Mi
#memoryLimitOverwriteMaxAllowed: 512Mi
#cpuRequests: 100m
#cpuRequestsOverwriteMaxAllowed: 200m
#memoryRequests: 128Mi
#memoryRequestsOverwriteMaxAllowed: 256Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
runAsNonRoot: true
privileged: false
capabilities:
drop: ["ALL"]
podSecurityContext:
runAsUser: 100
fsGroup: 65533
resources:
limits:
memory: 2Gi
requests:
memory: 128Mi
cpu: 500m

View File

@@ -1,7 +0,0 @@
apiVersion: v2
name: internalproxy
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "0.1.0"

View File

@@ -1,42 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: bitwarden-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`bitwarden.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: master-cluster
port: 443
tls:
secretName: bitwarden-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: bitwarden-tls
spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
secretName: bitwarden-tls
commonName: "bitwarden.durp.info"
dnsNames:
- "bitwarden.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: bitwarden-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: bitwarden.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,64 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: duplicati
spec:
ports:
- name: app
port: 8200
protocol: TCP
targetPort: 8200
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: duplicati
subsets:
- addresses:
- ip: 192.168.21.200
ports:
- name: app
port: 8200
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: duplicati-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`duplicati.internal.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
- name: authentik-proxy-provider
namespace: traefik
kind: Rule
services:
- name: duplicati
port: 8200
tls:
secretName: duplicati-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: duplicati-tls
spec:
secretName: duplicati-tls
issuerRef:
name: vault-issuer
kind: ClusterIssuer
commonName: "duplicati.internal.durp.info"
dnsNames:
- "duplicati.internal.durp.info"

View File

@@ -1,45 +0,0 @@
apiVersion: v1
kind: Endpoints
metadata:
name: master-cluster
subsets:
- addresses:
- ip: 192.168.20.130
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: master-cluster
spec:
ports:
- protocol: TCP
port: 443
targetPort: 443
---
apiVersion: v1
kind: Endpoints
metadata:
name: infra-cluster
subsets:
- addresses:
- ip: 192.168.12.130
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: infra-cluster
spec:
ports:
- protocol: TCP
port: 443
targetPort: 443

View File

@@ -1,72 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: gitea
spec:
ports:
- name: app
port: 3000
protocol: TCP
targetPort: 3000
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: gitea
subsets:
- addresses:
- ip: 192.168.21.200
ports:
- name: app
port: 3000
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: gitea-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`gitea.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: gitea
port: 3000
scheme: http
tls:
secretName: gitea-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: gitea-tls
spec:
secretName: gitea-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "gitea.durp.info"
dnsNames:
- "gitea.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: gitea-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: gitea.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,72 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kasm
spec:
ports:
- name: app
port: 443
protocol: TCP
targetPort: 443
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: kasm
subsets:
- addresses:
- ip: 192.168.20.104
ports:
- name: app
port: 443
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: kasm-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`kasm.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: kasm
port: 443
scheme: https
tls:
secretName: kasm-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: kasm-tls
spec:
secretName: kasm-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "kasm.durp.info"
dnsNames:
- "kasm.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: kasm-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: kasm.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,45 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: kuma-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`kuma.durp.info`) && PathPrefix(`/`)
kind: Rule
middlewares:
- name: authentik-proxy-provider
namespace: traefik
services:
- name: master-cluster
port: 443
tls:
secretName: kuma-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: kuma-tls
spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
secretName: kuma-tls
commonName: "kuma.durp.info"
dnsNames:
- "kuma.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: kuma-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: kuma.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,71 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nexus
spec:
ports:
- name: app
port: 8081
protocol: TCP
targetPort: 8081
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: nexus
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 8081
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: nexus-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`nexus.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: nexus
port: 8081
tls:
secretName: nexus-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: nexus-tls
spec:
secretName: nexus-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "nexus.durp.info"
dnsNames:
- "nexus.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: nexus-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: nexus.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,102 +0,0 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: ollama-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: ollama-secret
data:
- secretKey: users
remoteRef:
key: kv/ollama
property: users
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: ollama-basic-auth
spec:
basicAuth:
headerField: x-api-key
secret: ollama-secret
---
apiVersion: v1
kind: Service
metadata:
name: ollama
spec:
ports:
- name: app
port: 11435
protocol: TCP
targetPort: 11435
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: ollama
subsets:
- addresses:
- ip: 192.168.20.104
ports:
- name: app
port: 11435
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: ollama-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`ollama.durp.info`) && PathPrefix(`/`)
middlewares:
- name: ollama-basic-auth
kind: Rule
services:
- name: ollama
port: 11435
tls:
secretName: ollama-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ollama-tls
spec:
secretName: ollama-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "ollama.durp.info"
dnsNames:
- "ollama.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: ollama-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: ollama.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,42 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: open-webui-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`open-webui.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: master-cluster
port: 443
tls:
secretName: open-webui-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: open-webui-tls
spec:
secretName: open-webui-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "open-webui.durp.info"
dnsNames:
- "open-webui.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: open-webui-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: open-webui.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,72 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: plex
spec:
ports:
- name: app
port: 32400
protocol: TCP
targetPort: 32400
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: plex
subsets:
- addresses:
- ip: 192.168.21.200
ports:
- name: app
port: 32400
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: plex-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`plex.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: plex
port: 32400
scheme: https
tls:
secretName: plex-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: plex-tls
spec:
secretName: plex-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "plex.durp.info"
dnsNames:
- "plex.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: plex-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: plex.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,62 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: portainer
spec:
ports:
- name: app
port: 9443
protocol: TCP
targetPort: 9443
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: portainer
subsets:
- addresses:
- ip: 192.168.20.104
ports:
- name: app
port: 9443
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: portainer-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`portainer.internal.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
kind: Rule
services:
- name: portainer
port: 9443
tls:
secretName: portainer-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: portainer-tls
spec:
secretName: portainer-tls
issuerRef:
name: vault-issuer
kind: ClusterIssuer
commonName: "portainer.internal.durp.info"
dnsNames:
- "portainer.internal.durp.info"

View File

@@ -1,74 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: redlib
spec:
ports:
- name: app
port: 8082
protocol: TCP
targetPort: 8082
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: redlib
subsets:
- addresses:
- ip: 192.168.21.200
ports:
- name: app
port: 8082
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: redlib-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`redlib.durp.info`) && PathPrefix(`/`)
middlewares:
- name: authentik-proxy-provider
namespace: traefik
kind: Rule
services:
- name: redlib
port: 8082
tls:
secretName: redlib-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: redlib-tls
spec:
secretName: redlib-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "redlib.durp.info"
dnsNames:
- "redlib.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: redlib-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: redlib.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,71 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: registry
spec:
ports:
- name: app
port: 5000
protocol: TCP
targetPort: 5000
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: registry
subsets:
- addresses:
- ip: 192.168.21.200
ports:
- name: app
port: 5000
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: registry-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`registry.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: registry
port: 5000
tls:
secretName: registry-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: registry-tls
spec:
secretName: registry-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "registry.durp.info"
dnsNames:
- "registry.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: registry-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: registry.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,63 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: root-vault
spec:
ports:
- name: app
port: 8201
protocol: TCP
targetPort: 8201
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: root-vault
subsets:
- addresses:
- ip: 192.168.21.200
ports:
- name: app
port: 8201
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: root-vault-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`root-vault.internal.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
kind: Rule
services:
- name: root-vault
port: 8201
scheme: https
tls:
secretName: root-vault-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: root-vault-tls
spec:
secretName: root-vault-tls
issuerRef:
name: vault-issuer
kind: ClusterIssuer
commonName: "root-vault.internal.durp.info"
dnsNames:
- "root-vault.internal.durp.info"

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault

View File

@@ -1,76 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: smokeping
spec:
ports:
- name: app
port: 81
protocol: TCP
targetPort: 81
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: smokeping
subsets:
- addresses:
- ip: 192.168.21.200
ports:
- name: app
port: 81
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: smokeping-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`smokeping.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
- name: authentik-proxy-provider
namespace: traefik
kind: Rule
services:
- name: smokeping
port: 81
tls:
secretName: smokeping-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: smokeping-tls
spec:
secretName: smokeping-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "smokeping.durp.info"
dnsNames:
- "smokeping.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: smokeping-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: smokeping.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,74 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: speedtest
spec:
ports:
- name: app
port: 6580
protocol: TCP
targetPort: 6580
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: speedtest
subsets:
- addresses:
- ip: 192.168.21.200
ports:
- name: app
port: 6580
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: speedtest-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`speedtest.durp.info`) && PathPrefix(`/`)
kind: Rule
middlewares:
- name: authentik-proxy-provider
namespace: traefik
services:
- name: speedtest
port: 6580
tls:
secretName: speedtest-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: speedtest-tls
spec:
secretName: speedtest-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "speedtest.durp.info"
dnsNames:
- "speedtest.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: speedtest-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: speedtest.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,67 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: tdarr
spec:
ports:
- name: app
port: 8267
protocol: TCP
targetPort: 8267
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: tdarr
subsets:
- addresses:
- ip: 192.168.21.200
ports:
- name: app
port: 8267
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: tdarr-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
spec:
entryPoints:
- websecure
routes:
- match: Host(`tdarr.internal.durp.info`)
middlewares:
- name: whitelist
namespace: traefik
- name: authentik-proxy-provider
namespace: traefik
kind: Rule
services:
- name: tdarr
port: 8267
scheme: http
tls:
secretName: tdarr-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: tdarr-tls
spec:
secretName: tdarr-tls
issuerRef:
name: vault-issuer
kind: ClusterIssuer
commonName: "tdarr.internal.durp.info"
dnsNames:
- "tdarr.internal.durp.info"

View File

@@ -1,63 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: unifi
spec:
ports:
- name: app
port: 443
protocol: TCP
targetPort: 443
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: unifi
subsets:
- addresses:
- ip: 192.168.98.1
ports:
- name: app
port: 443
protocol: TCP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: unifi-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`unifi.internal.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
kind: Rule
services:
- name: unifi
port: 443
scheme: https
tls:
secretName: unifi-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: unifi-tls
spec:
secretName: unifi-tls
issuerRef:
name: vault-issuer
kind: ClusterIssuer
commonName: "unifi.internal.durp.info"
dnsNames:
- "unifi.internal.durp.info"

View File

@@ -1,17 +0,0 @@
apiVersion: v2
name: istio-system
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: base
repository: https://istio-release.storage.googleapis.com/charts
version: 1.25.0
- name: istiod
repository: https://istio-release.storage.googleapis.com/charts
version: 1.25.0
- name: gateway
repository: https://istio-release.storage.googleapis.com/charts
version: 1.25.0

View File

@@ -1,13 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
annotations:
topology.istio.io/controlPlaneClusters: cluster1
labels:
kubernetes.io/metadata.name: istio-system
name: istio-system
spec:
finalizers:
- kubernetes
status:
phase: Active

View File

@@ -1,725 +0,0 @@
istiod:
profile: remote
autoscaleEnabled: true
autoscaleMin: 1
autoscaleMax: 5
autoscaleBehavior: {}
replicaCount: 1
rollingMaxSurge: 100%
rollingMaxUnavailable: 25%
hub: ""
tag: ""
variant: ""
# Can be a full hub/image:tag
image: pilot
traceSampling: 1.0
# Resources for a small pilot install
resources:
requests:
cpu: 500m
memory: 2048Mi
# Set to `type: RuntimeDefault` to use the default profile if available.
seccompProfile: {}
# Whether to use an existing CNI installation
cni:
enabled: false
provider: default
# Additional container arguments
extraContainerArgs: []
env: {}
# Settings related to the untaint controller
# This controller will remove `cni.istio.io/not-ready` from nodes when the istio-cni pod becomes ready
# It should be noted that cluster operator/owner is responsible for having the taint set by their infrastructure provider when new nodes are added to the cluster; the untaint controller does not taint nodes
taint:
# Controls whether or not the untaint controller is active
enabled: false
# What namespace the untaint controller should watch for istio-cni pods. This is only required when istio-cni is running in a different namespace than istiod
namespace: ""
affinity: {}
tolerations: []
cpu:
targetAverageUtilization: 80
memory: {}
# targetAverageUtilization: 80
# Additional volumeMounts to the istiod container
volumeMounts: []
# Additional volumes to the istiod pod
volumes: []
# Inject initContainers into the istiod pod
initContainers: []
nodeSelector: {}
podAnnotations: {}
serviceAnnotations: {}
serviceAccountAnnotations: {}
sidecarInjectorWebhookAnnotations: {}
topologySpreadConstraints: []
# You can use jwksResolverExtraRootCA to provide a root certificate
# in PEM format. This will then be trusted by pilot when resolving
# JWKS URIs.
jwksResolverExtraRootCA: ""
# The following is used to limit how long a sidecar can be connected
# to a pilot. It balances out load across pilot instances at the cost of
# increasing system churn.
keepaliveMaxServerConnectionAge: 30m
# Additional labels to apply to the deployment.
deploymentLabels: {}
## Mesh config settings
# Install the mesh config map, generated from values.yaml.
# If false, pilot wil use default values (by default) or user-supplied values.
configMap: true
# Additional labels to apply on the pod level for monitoring and logging configuration.
podLabels: {}
# Setup how istiod Service is configured. See https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services
ipFamilyPolicy: ""
ipFamilies: []
# Ambient mode only.
# Set this if you install ztunnel to a different namespace from `istiod`.
# If set, `istiod` will allow connections from trusted node proxy ztunnels
# in the provided namespace.
# If unset, `istiod` will assume the trusted node proxy ztunnel resides
# in the same namespace as itself.
trustedZtunnelNamespace: ""
sidecarInjectorWebhook:
# You can use the field called alwaysInjectSelector and neverInjectSelector which will always inject the sidecar or
# always skip the injection on pods that match that label selector, regardless of the global policy.
# See https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/#more-control-adding-exceptions
neverInjectSelector: []
alwaysInjectSelector: []
# injectedAnnotations are additional annotations that will be added to the pod spec after injection
# This is primarily to support PSP annotations. For example, if you defined a PSP with the annotations:
#
# annotations:
# apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
# apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
#
# The PSP controller would add corresponding annotations to the pod spec for each container. However, this happens before
# the inject adds additional containers, so we must specify them explicitly here. With the above example, we could specify:
# injectedAnnotations:
# container.apparmor.security.beta.kubernetes.io/istio-init: runtime/default
# container.apparmor.security.beta.kubernetes.io/istio-proxy: runtime/default
injectedAnnotations: {}
# This enables injection of sidecar in all namespaces,
# with the exception of namespaces with "istio-injection:disabled" annotation
# Only one environment should have this enabled.
enableNamespacesByDefault: false
# Mutations that occur after the sidecar injector are not handled by default, as the Istio sidecar injector is only run
# once. For example, an OPA sidecar injected after the Istio sidecar will not have it's liveness/readiness probes rewritten.
# Setting this to `IfNeeded` will result in the sidecar injector being run again if additional mutations occur.
reinvocationPolicy: Never
rewriteAppHTTPProbe: true
# Templates defines a set of custom injection templates that can be used. For example, defining:
#
# templates:
# hello: |
# metadata:
# labels:
# hello: world
#
# Then starting a pod with the `inject.istio.io/templates: hello` annotation, will result in the pod
# being injected with the hello=world labels.
# This is intended for advanced configuration only; most users should use the built in template
templates: {}
# Default templates specifies a set of default templates that are used in sidecar injection.
# By default, a template `sidecar` is always provided, which contains the template of default sidecar.
# To inject other additional templates, define it using the `templates` option, and add it to
# the default templates list.
# For example:
#
# templates:
# hello: |
# metadata:
# labels:
# hello: world
#
# defaultTemplates: ["sidecar", "hello"]
defaultTemplates: []
istiodRemote:
# If `true`, indicates that this cluster/install should consume a "remote istiod" installation,
# and istiod itself will NOT be installed in this cluster - only the support resources necessary
# to utilize a remote instance.
enabled: false
# Sidecar injector mutating webhook configuration clientConfig.url value.
# For example: https://$remotePilotAddress:15017/inject
# The host should not refer to a service running in the cluster; use a service reference by specifying
# the clientConfig.service field instead.
injectionURL: ""
# Sidecar injector mutating webhook configuration path value for the clientConfig.service field.
# Override to pass env variables, for example: /inject/cluster/remote/net/network2
injectionPath: "/inject/cluster/cluster2/net/network1"
injectionCABundle: ""
telemetry:
enabled: true
v2:
# For Null VM case now.
# This also enables metadata exchange.
enabled: true
# Indicate if prometheus stats filter is enabled or not
prometheus:
enabled: true
# stackdriver filter settings.
stackdriver:
enabled: false
# Revision is set as 'version' label and part of the resource names when installing multiple control planes.
revision: ""
# Revision tags are aliases to Istio control plane revisions
revisionTags: []
# For Helm compatibility.
ownerName: ""
# meshConfig defines runtime configuration of components, including Istiod and istio-agent behavior
# See https://istio.io/docs/reference/config/istio.mesh.v1alpha1/ for all available options
meshConfig:
enablePrometheusMerge: true
experimental:
stableValidationPolicy: false
global:
# Used to locate istiod.
istioNamespace: istio-system
# List of cert-signers to allow "approve" action in the istio cluster role
#
# certSigners:
# - clusterissuers.cert-manager.io/istio-ca
certSigners: []
# enable pod disruption budget for the control plane, which is used to
# ensure Istio control plane components are gradually upgraded or recovered.
defaultPodDisruptionBudget:
enabled: true
# The values aren't mutable due to a current PodDisruptionBudget limitation
# minAvailable: 1
# A minimal set of requested resources to applied to all deployments so that
# Horizontal Pod Autoscaler will be able to function (if set).
# Each component can overwrite these default values by adding its own resources
# block in the relevant section below and setting the desired resources values.
defaultResources:
requests:
cpu: 10m
# memory: 128Mi
# limits:
# cpu: 100m
# memory: 128Mi
# Default hub for Istio images.
# Releases are published to docker hub under 'istio' project.
# Dev builds from prow are on gcr.io
hub: docker.io/istio
# Default tag for Istio images.
tag: 1.25.0
# Variant of the image to use.
# Currently supported are: [debug, distroless]
variant: ""
# Specify image pull policy if default behavior isn't desired.
# Default behavior: latest images will be Always else IfNotPresent.
imagePullPolicy: ""
# ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace
# to use for pulling any images in pods that reference this ServiceAccount.
# For components that don't use ServiceAccounts (i.e. grafana, servicegraph, tracing)
# ImagePullSecrets will be added to the corresponding Deployment(StatefulSet) objects.
# Must be set for any cluster configured with private docker registry.
imagePullSecrets: []
# - private-registry-key
# Enabled by default in master for maximising testing.
istiod:
enableAnalysis: false
# To output all istio components logs in json format by adding --log_as_json argument to each container argument
logAsJson: false
# Comma-separated minimum per-scope logging level of messages to output, in the form of <scope>:<level>,<scope>:<level>
# The control plane has different scopes depending on component, but can configure default log level across all components
# If empty, default scope and level will be used as configured in code
logging:
level: "default:info"
omitSidecarInjectorConfigMap: false
# Configure whether Operator manages webhook configurations. The current behavior
# of Istiod is to manage its own webhook configurations.
# When this option is set as true, Istio Operator, instead of webhooks, manages the
# webhook configurations. When this option is set as false, webhooks manage their
# own webhook configurations.
operatorManageWebhooks: false
# Custom DNS config for the pod to resolve names of services in other
# clusters. Use this to add additional search domains, and other settings.
# see
# https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#dns-config
# This does not apply to gateway pods as they typically need a different
# set of DNS settings than the normal application pods (e.g., in
# multicluster scenarios).
# NOTE: If using templates, follow the pattern in the commented example below.
#podDNSSearchNamespaces:
#- global
#- "{{ valueOrDefault .DeploymentMeta.Namespace \"default\" }}.global"
# Kubernetes >=v1.11.0 will create two PriorityClass, including system-cluster-critical and
# system-node-critical, it is better to configure this in order to make sure your Istio pods
# will not be killed because of low priority class.
# Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
# for more detail.
priorityClassName: ""
proxy:
image: proxyv2
# This controls the 'policy' in the sidecar injector.
autoInject: enabled
# CAUTION: It is important to ensure that all Istio helm charts specify the same clusterDomain value
# cluster domain. Default value is "cluster.local".
clusterDomain: "cluster.local"
# Per Component log level for proxy, applies to gateways and sidecars. If a component level is
# not set, then the global "logLevel" will be used.
componentLogLevel: "misc:error"
# istio ingress capture allowlist
# examples:
# Redirect only selected ports: --includeInboundPorts="80,8080"
excludeInboundPorts: ""
includeInboundPorts: "*"
# istio egress capture allowlist
# https://istio.io/docs/tasks/traffic-management/egress.html#calling-external-services-directly
# example: includeIPRanges: "172.30.0.0/16,172.20.0.0/16"
# would only capture egress traffic on those two IP Ranges, all other outbound traffic would
# be allowed by the sidecar
includeIPRanges: "*"
excludeIPRanges: ""
includeOutboundPorts: ""
excludeOutboundPorts: ""
# Log level for proxy, applies to gateways and sidecars.
# Expected values are: trace|debug|info|warning|error|critical|off
logLevel: warning
# Specify the path to the outlier event log.
# Example: /dev/stdout
outlierLogPath: ""
#If set to true, istio-proxy container will have privileged securityContext
privileged: false
# The number of successive failed probes before indicating readiness failure.
readinessFailureThreshold: 4
# The initial delay for readiness probes in seconds.
readinessInitialDelaySeconds: 0
# The period between readiness probes.
readinessPeriodSeconds: 15
# Enables or disables a startup probe.
# For optimal startup times, changing this should be tied to the readiness probe values.
#
# If the probe is enabled, it is recommended to have delay=0s,period=15s,failureThreshold=4.
# This ensures the pod is marked ready immediately after the startup probe passes (which has a 1s poll interval),
# and doesn't spam the readiness endpoint too much
#
# If the probe is disabled, it is recommended to have delay=1s,period=2s,failureThreshold=30.
# This ensures the startup is reasonable fast (polling every 2s). 1s delay is used since the startup is not often ready instantly.
startupProbe:
enabled: true
failureThreshold: 600 # 10 minutes
# Resources for the sidecar.
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
# Default port for Pilot agent health checks. A value of 0 will disable health checking.
statusPort: 15020
# Specify which tracer to use. One of: zipkin, lightstep, datadog, stackdriver, none.
# If using stackdriver tracer outside GCP, set env GOOGLE_APPLICATION_CREDENTIALS to the GCP credential file.
tracer: "none"
proxy_init:
# Base name for the proxy_init container, used to configure iptables.
image: proxyv2
# Bypasses iptables idempotency handling, and attempts to apply iptables rules regardless of table state, which may cause unrecoverable failures.
# Do not use unless you need to work around an issue of the idempotency handling. This flag will be removed in future releases.
forceApplyIptables: false
# configure remote pilot and istiod service and endpoint
remotePilotAddress: "192.168.12.131"
##############################################################################################
# The following values are found in other charts. To effectively modify these values, make #
# make sure they are consistent across your Istio helm charts #
##############################################################################################
# The customized CA address to retrieve certificates for the pods in the cluster.
# CSR clients such as the Istio Agent and ingress gateways can use this to specify the CA endpoint.
# If not set explicitly, default to the Istio discovery address.
caAddress: ""
# Enable control of remote clusters.
externalIstiod: false
# Configure a remote cluster as the config cluster for an external istiod.
configCluster: true
# configValidation enables the validation webhook for Istio configuration.
configValidation: true
# Mesh ID means Mesh Identifier. It should be unique within the scope where
# meshes will interact with each other, but it is not required to be
# globally/universally unique. For example, if any of the following are true,
# then two meshes must have different Mesh IDs:
# - Meshes will have their telemetry aggregated in one place
# - Meshes will be federated together
# - Policy will be written referencing one mesh from the other
#
# If an administrator expects that any of these conditions may become true in
# the future, they should ensure their meshes have different Mesh IDs
# assigned.
#
# Within a multicluster mesh, each cluster must be (manually or auto)
# configured to have the same Mesh ID value. If an existing cluster 'joins' a
# multicluster mesh, it will need to be migrated to the new mesh ID. Details
# of migration TBD, and it may be a disruptive operation to change the Mesh
# ID post-install.
#
# If the mesh admin does not specify a value, Istio will use the value of the
# mesh's Trust Domain. The best practice is to select a proper Trust Domain
# value.
meshID: ""
# Configure the mesh networks to be used by the Split Horizon EDS.
#
# The following example defines two networks with different endpoints association methods.
# For `network1` all endpoints that their IP belongs to the provided CIDR range will be
# mapped to network1. The gateway for this network example is specified by its public IP
# address and port.
# The second network, `network2`, in this example is defined differently with all endpoints
# retrieved through the specified Multi-Cluster registry being mapped to network2. The
# gateway is also defined differently with the name of the gateway service on the remote
# cluster. The public IP for the gateway will be determined from that remote service (only
# LoadBalancer gateway service type is currently supported, for a NodePort type gateway service,
# it still need to be configured manually).
#
# meshNetworks:
# network1:
# endpoints:
# - fromCidr: "192.168.0.1/24"
# gateways:
# - address: 1.1.1.1
# port: 80
# network2:
# endpoints:
# - fromRegistry: reg1
# gateways:
# - registryServiceName: istio-ingressgateway.istio-system.svc.cluster.local
# port: 443
#
meshNetworks: {}
# Use the user-specified, secret volume mounted key and certs for Pilot and workloads.
mountMtlsCerts: false
multiCluster:
# Set to true to connect two kubernetes clusters via their respective
# ingressgateway services when pods in each cluster cannot directly
# talk to one another. All clusters should be using Istio mTLS and must
# have a shared root CA for this model to work.
enabled: false
# Should be set to the name of the cluster this installation will run in. This is required for sidecar injection
# to properly label proxies
clusterName: "dmz"
# Network defines the network this cluster belong to. This name
# corresponds to the networks in the map of mesh networks.
network: ""
# Configure the certificate provider for control plane communication.
# Currently, two providers are supported: "kubernetes" and "istiod".
# As some platforms may not have kubernetes signing APIs,
# Istiod is the default
pilotCertProvider: istiod
sds:
# The JWT token for SDS and the aud field of such JWT. See RFC 7519, section 4.1.3.
# When a CSR is sent from Istio Agent to the CA (e.g. Istiod), this aud is to make sure the
# JWT is intended for the CA.
token:
aud: istio-ca
sts:
# The service port used by Security Token Service (STS) server to handle token exchange requests.
# Setting this port to a non-zero value enables STS server.
servicePort: 0
# The name of the CA for workload certificates.
# For example, when caName=GkeWorkloadCertificate, GKE workload certificates
# will be used as the certificates for workloads.
# The default value is "" and when caName="", the CA will be configured by other
# mechanisms (e.g., environmental variable CA_PROVIDER).
caName: ""
waypoint:
# Resources for the waypoint proxy.
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: "2"
memory: 1Gi
# If specified, affinity defines the scheduling constraints of waypoint pods.
affinity: {}
# Topology Spread Constraints for the waypoint proxy.
topologySpreadConstraints: []
# Node labels for the waypoint proxy.
nodeSelector: {}
# Tolerations for the waypoint proxy.
tolerations: []
base:
# For istioctl usage to disable istio config crds in base
enableIstioConfigCRDs: true
# Gateway Settings
gateways:
# Define the security context for the pod.
# If unset, this will be automatically set to the minimum privileges required to bind to port 80 and 443.
# On Kubernetes 1.22+, this only requires the `net.ipv4.ip_unprivileged_port_start` sysctl.
securityContext: {}
# Set to `type: RuntimeDefault` to use the default profile for templated gateways, if your container runtime supports it
seccompProfile: {}
base:
profile: remote
global:
imagePullSecrets: []
istioNamespace: istio-system
base:
excludedCRDs: []
enableCRDTemplates: true
validationURL: ""
validationCABundle: ""
enableIstioConfigCRDs: true
defaultRevision: "default"
experimental:
stableValidationPolicy: false
gateway:
# Name allows overriding the release name. Generally this should not be set
name: "istio-eastwestgateway"
# revision declares which revision this gateway is a part of
revision: ""
# Controls the spec.replicas setting for the Gateway deployment if set.
# Otherwise defaults to Kubernetes Deployment default (1).
replicaCount:
kind: Deployment
rbac:
# If enabled, roles will be created to enable accessing certificates from Gateways. This is not needed
# when using http://gateway-api.org/.
enabled: true
serviceAccount:
# If set, a service account will be created. Otherwise, the default is used
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set, the release name is used
name: ""
podAnnotations:
prometheus.io/port: "15020"
prometheus.io/scrape: "true"
prometheus.io/path: "/stats/prometheus"
inject.istio.io/templates: "gateway"
sidecar.istio.io/inject: "true"
# Define the security context for the pod.
# If unset, this will be automatically set to the minimum privileges required to bind to port 80 and 443.
# On Kubernetes 1.22+, this only requires the `net.ipv4.ip_unprivileged_port_start` sysctl.
securityContext: {}
containerSecurityContext: {}
service:
# Type of service. Set to "None" to disable the service entirely
type: LoadBalancer
ports:
- name: status-port
port: 15021
protocol: TCP
targetPort: 15021
- name: http2
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
annotations: {}
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: ""
externalIPs: []
ipFamilyPolicy: ""
ipFamilies: []
## Whether to automatically allocate NodePorts (only for LoadBalancers).
# allocateLoadBalancerNodePorts: false
## Set LoadBalancer class (only for LoadBalancers).
# loadBalancerClass: ""
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: {}
autoscaleBehavior: {}
# Pod environment variables
env: {}
# Deployment Update strategy
strategy: {}
# Sets the Deployment minReadySeconds value
minReadySeconds:
# Optionally configure a custom readinessProbe. By default the control plane
# automatically injects the readinessProbe. If you wish to override that
# behavior, you may define your own readinessProbe here.
readinessProbe: {}
# Labels to apply to all resources
labels:
# By default, don't enroll gateways into the ambient dataplane
"istio.io/dataplane-mode": none
# Annotations to apply to all resources
annotations: {}
nodeSelector: {}
tolerations: []
topologySpreadConstraints: []
affinity: {}
# If specified, the gateway will act as a network gateway for the given network.
networkGateway: "network1"
# Specify image pull policy if default behavior isn't desired.
# Default behavior: latest images will be Always else IfNotPresent
imagePullPolicy: ""
imagePullSecrets: []
# This value is used to configure a Kubernetes PodDisruptionBudget for the gateway.
#
# By default, the `podDisruptionBudget` is disabled (set to `{}`),
# which means that no PodDisruptionBudget resource will be created.
#
# To enable the PodDisruptionBudget, configure it by specifying the
# `minAvailable` or `maxUnavailable`. For example, to set the
# minimum number of available replicas to 1, you can update this value as follows:
#
# podDisruptionBudget:
# minAvailable: 1
#
# Or, to allow a maximum of 1 unavailable replica, you can set:
#
# podDisruptionBudget:
# maxUnavailable: 1
#
# You can also specify the `unhealthyPodEvictionPolicy` field, and the valid values are `IfHealthyBudget` and `AlwaysAllow`.
# For example, to set the `unhealthyPodEvictionPolicy` to `AlwaysAllow`, you can update this value as follows:
#
# podDisruptionBudget:
# minAvailable: 1
# unhealthyPodEvictionPolicy: AlwaysAllow
#
# To disable the PodDisruptionBudget, you can leave it as an empty object `{}`:
#
# podDisruptionBudget: {}
#
podDisruptionBudget: {}
# Sets the per-pod terminationGracePeriodSeconds setting.
terminationGracePeriodSeconds: 30
# A list of `Volumes` added into the Gateway Pods. See
# https://kubernetes.io/docs/concepts/storage/volumes/.
volumes: []
# A list of `VolumeMounts` added into the Gateway Pods. See
# https://kubernetes.io/docs/concepts/storage/volumes/.
volumeMounts: []
# Configure this to a higher priority class in order to make sure your Istio gateway pods
# will not be killed because of low priority class.
# Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
# for more detail.
priorityClassName: ""

Some files were not shown because too many files have changed in this diff Show More