18 Commits

Author SHA1 Message Date
d38a2d2840 update 2024-07-21 09:49:15 -05:00
2b88107a28 Merge branch 'mr-prd' into 'prd'
Mr prd

See merge request developerdurp/homelab!2
2024-07-21 12:58:33 +00:00
d9be744f17 Mr prd 2024-07-21 12:58:33 +00:00
b1a4779d96 update 2024-07-21 07:41:04 -05:00
54432a447f update 2024-07-21 07:39:08 -05:00
779d3448d5 update 2024-07-21 07:33:18 -05:00
7d9ddc574f update 2024-07-21 06:57:15 -05:00
0b4238217b update ingress 2024-07-21 06:50:32 -05:00
39020882f4 update 2024-07-21 06:28:57 -05:00
39f53751ea update 2024-07-21 06:27:08 -05:00
d686567857 update 2024-07-21 06:25:13 -05:00
74bca2946b update 2024-07-21 06:23:44 -05:00
1972f4a965 add metallb 2024-07-21 06:21:02 -05:00
b4f11ea722 update for ollama url 2024-07-20 09:22:12 -05:00
004990a4ca update 2024-06-23 12:15:12 -05:00
pipeline
1481dbe107 Update Chart 2024-05-29 11:29:48 +00:00
0414919e05 update 2024-05-25 07:30:32 -05:00
d5ab5c4671 move all to prd 2024-05-11 06:34:17 -05:00
171 changed files with 809 additions and 2953 deletions

View File

@@ -9,6 +9,6 @@ appVersion: "1.16.0"
dependencies:
- name: argo-cd
repository: https://argoproj.github.io/argo-helm
version: 6.11.1
version: 6.7.11

View File

@@ -1,16 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vault
name: argocd
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/vault
targetRevision: prd
path: argocd
destination:
namespace: vault
namespace: argocd
name: in-cluster
syncPolicy:
automated:
@@ -18,4 +18,3 @@ spec:
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/authentik
targetRevision: prd
path: authentik
destination:
namespace: authentik
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/bitwarden
targetRevision: prd
path: bitwarden
directory:
recurse: true
destination:

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/cert-manager
targetRevision: prd
path: cert-manager
destination:
namespace: cert-manager
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/crossplane
targetRevision: prd
path: crossplane
destination:
namespace: crossplane
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/durpapi
targetRevision: prd
path: durpapi
destination:
namespace: durpapi
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/durpot
targetRevision: prd
path: durpot
destination:
namespace: durpot
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/external-dns
targetRevision: prd
path: external-dns
destination:
namespace: external-dns
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/external-secrets
targetRevision: prd
path: external-secrets
destination:
namespace: external-secrets
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/gatekeeper
targetRevision: prd
path: gatekeeper
destination:
namespace: gatekeeper
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/gitlab-runner
targetRevision: prd
path: gitlab-runner
destination:
namespace: gitlab-runner
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/heimdall
targetRevision: prd
path: heimdall
destination:
namespace: heimdall
name: in-cluster

View File

@@ -0,0 +1,36 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: argocd-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
spec:
entryPoints:
- websecure
routes:
- match: Host(`argocd.internal.prd.durp.info`)
middlewares:
- name: internal-only
namespace: traefik
kind: Rule
services:
- name: argocd-server
port: 443
scheme: https
tls:
secretName: argocd-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: argocd-tls
spec:
secretName: argocd-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "argocd.internal.prd.durp.info"
dnsNames:
- "argocd.internal.prd.durp.info"

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/krakend
targetRevision: prd
path: krakend
destination:
namespace: krakend
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/kube-prometheus-stack
targetRevision: prd
path: kube-prometheus-stack
destination:
namespace: kube-prometheus-stack
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/kubeclarity
targetRevision: prd
path: kubeclarity
destination:
namespace: kubeclarity
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/littlelink
targetRevision: prd
path: littlelink
directory:
recurse: true
destination:

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/longhorn
targetRevision: prd
path: longhorn
destination:
namespace: longhorn-system
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/metallb-system
targetRevision: prd
path: metallb-system
destination:
namespace: metallb-system
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/nfs-client
targetRevision: prd
path: nfs-client
directory:
recurse: true
destination:

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/open-webui
targetRevision: prd
path: open-webui
destination:
namespace: open-webui
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/traefik
targetRevision: prd
path: traefik
destination:
namespace: traefik
name: in-cluster

View File

@@ -7,8 +7,8 @@ spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/uptimekuma
targetRevision: prd
path: uptimekuma
directory:
recurse: true
destination:

View File

@@ -1,16 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: traefik
name: vault
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/traefik
targetRevision: prd
path: vault
destination:
namespace: traefik
namespace: vault
name: in-cluster
syncPolicy:
automated:
@@ -18,4 +18,8 @@ spec:
selfHeal: true
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- group: admissionregistration.k8s.io
kind: MutatingWebhookConfiguration
jqPathExpressions:
- .webhooks[]?.clientConfig.caBundle

View File

@@ -33,13 +33,13 @@ argo-cd:
cm:
create: true
annotations: {}
url: https://argocd.internal.durp.info
url: https://argocd.internal.prd.durp.info
oidc.tls.insecure.skip.verify: "true"
dex.config: |
connectors:
- config:
issuer: https://authentik.durp.info/application/o/argocd/
clientID: dbb8ffc06104fb6e7fac3e4ae7fafb1d90437625
issuer: https://authentik.prd.durp.info/application/o/argocd/
clientID: lKuMgyYaOlQMNAUSjsRVYgkwZG9UT6CeFWeTLAcl
clientSecret: $client-secret:clientSecret
insecureEnableGroups: true
scopes:

View File

@@ -9,4 +9,4 @@ appVersion: "1.16.0"
dependencies:
- name: authentik
repository: https://charts.goauthentik.io
version: 2024.8.3
version: 2024.4.1

View File

@@ -6,7 +6,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`authentik.durp.info`) && PathPrefix(`/`)
- match: Host(`authentik.prd.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: authentik-server
@@ -25,9 +25,9 @@ spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "authentik.durp.info"
commonName: "authentik.prd.durp.info"
dnsNames:
- "authentik.durp.info"
- "authentik.prd.durp.info"
---
@@ -36,7 +36,7 @@ apiVersion: v1
metadata:
name: authentik-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: authentik.durp.info
external-dns.alpha.kubernetes.io/hostname: authentik.prd.durp.info
spec:
type: ExternalName
externalName: durp.info
externalName:.prd.durp.info

View File

@@ -1,6 +1,8 @@
authentik:
global:
env:
- name: AUTHENTIK_REDIS__DB
value: "1"
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
@@ -26,17 +28,22 @@ authentik:
server:
name: server
replicas: 3
worker:
replicas: 3
postgresql:
enabled: true
image:
registry: registry.internal.durp.info
repository: bitnami/postgresql
pullPolicy: Always
postgresqlUsername: "authentik"
postgresqlDatabase: "authentik"
auth:
username: "authentik"
existingSecret: db-pass
secretKeys:
adminPasswordKey: dbpass
userPasswordKey: dbpass
#postgresqlUsername: "authentik"
#postgresqlDatabase: "authentik"
#existingSecret: db-pass
persistence:
enabled: true
storageClass: longhorn
@@ -44,9 +51,6 @@ authentik:
- ReadWriteMany
redis:
enabled: true
master:
persistence:
enabled: false
image:
registry: registry.internal.durp.info
repository: bitnami/redis

View File

@@ -17,7 +17,7 @@ spec:
spec:
containers:
- name: bitwarden
image: registry.internal.durp.info/vaultwarden/server:1.32.7
image: registry.internal.durp.info/vaultwarden/server:1.30.5
imagePullPolicy: Always
volumeMounts:
- name: bitwarden-pvc
@@ -28,7 +28,7 @@ spec:
containerPort: 80
env:
- name: SIGNUPS_ALLOWED
value: "FALSE"
value: "TRUE"
- name: INVITATIONS_ALLOWED
value: "FALSE"
- name: WEBSOCKET_ENABLED
@@ -39,7 +39,7 @@ spec:
value: "80"
- name: ROCKET_WORKERS
value: "10"
- name: SECRET_USERNAME
- name: ADMIN_TOKEN
valueFrom:
secretKeyRef:
name: bitwarden-secret

View File

@@ -0,0 +1,63 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: bitwarden-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`bitwarden.prd.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: bitwarden
port: 80
tls:
secretName: bitwarden-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: bitwarden-tls
spec:
secretName: bitwarden-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "bitwarden.prd.durp.info"
dnsNames:
- "bitwarden.prd.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: bitwarden-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: bitwarden.prd.durp.info
spec:
type: ExternalName
externalName:.prd.durp.info
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: bitwarden-admin-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`bitwarden.prd.durp.info`) && PathPrefix(`/admin`)
kind: Rule
middlewares:
- name: whitelist
namespace: traefik
services:
- name: bitwarden
port: 80
tls:
secretName: bitwarden-tls

View File

@@ -8,4 +8,4 @@ appVersion: 0.0.1
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.15.3
version: 1.*.*

View File

@@ -0,0 +1,13 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-issuer
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: selfsigned-cluster-issuer
spec:
selfSigned: {}

View File

@@ -9,4 +9,4 @@ appVersion: "1.16.0"
dependencies:
- name: crossplane
repository: https://charts.crossplane.io/stable
version: 1.17.1
version: 1.16.0

View File

@@ -3,7 +3,7 @@ kind: Provider
metadata:
name: provider-gitlab
spec:
package: xpkg.upbound.io/crossplane-contrib/provider-gitlab:v0.5.0
package: xpkg.upbound.io/crossplane-contrib/provider-gitlab:v0.7.0
---
apiVersion: external-secrets.io/v1beta1

186
crossplane/values.yaml Normal file
View File

@@ -0,0 +1,186 @@
# helm-docs renders these comments into markdown. Use markdown formatting where
# appropiate.
#
# -- The number of Crossplane pod `replicas` to deploy.
replicas: 1
# -- The deployment strategy for the Crossplane and RBAC Manager pods.
deploymentStrategy: RollingUpdate
image:
# -- Repository for the Crossplane pod image.
repository: xpkg.upbound.io/crossplane/crossplane
# -- The Crossplane image tag. Defaults to the value of `appVersion` in `Chart.yaml`.
tag: ""
# -- The image pull policy used for Crossplane and RBAC Manager pods.
pullPolicy: IfNotPresent
# -- Add `nodeSelectors` to the Crossplane pod deployment.
nodeSelector: {}
# -- Add `tolerations` to the Crossplane pod deployment.
tolerations: []
# -- Add `affinities` to the Crossplane pod deployment.
affinity: {}
# -- Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet`.
hostNetwork: false
# -- Specify the `dnsPolicy` to be used by the Crossplane pod.
dnsPolicy: ""
# -- Add custom `labels` to the Crossplane pod deployment.
customLabels: {}
# -- Add custom `annotations` to the Crossplane pod deployment.
customAnnotations: {}
serviceAccount:
# -- Add custom `annotations` to the Crossplane ServiceAccount.
customAnnotations: {}
# -- Enable [leader election](https://docs.crossplane.io/latest/concepts/pods/#leader-election) for the Crossplane pod.
leaderElection: true
# -- Add custom arguments to the Crossplane pod.
args: []
provider:
# -- A list of Provider packages to install.
packages: []
configuration:
# -- A list of Configuration packages to install.
packages: []
function:
# -- A list of Function packages to install
packages: []
# -- The imagePullSecret names to add to the Crossplane ServiceAccount.
imagePullSecrets: []
registryCaBundleConfig:
# -- The ConfigMap name containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates.
name: ""
# -- The ConfigMap key containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates.
key: ""
service:
# -- Configure annotations on the service object. Only enabled when webhooks.enabled = true
customAnnotations: {}
webhooks:
# -- Enable webhooks for Crossplane and installed Provider packages.
enabled: true
rbacManager:
# -- Deploy the RBAC Manager pod and its required roles.
deploy: true
# -- Don't install aggregated Crossplane ClusterRoles.
skipAggregatedClusterRoles: false
# -- The number of RBAC Manager pod `replicas` to deploy.
replicas: 1
# -- Enable [leader election](https://docs.crossplane.io/latest/concepts/pods/#leader-election) for the RBAC Manager pod.
leaderElection: true
# -- Add custom arguments to the RBAC Manager pod.
args: []
# -- Add `nodeSelectors` to the RBAC Manager pod deployment.
nodeSelector: {}
# -- Add `tolerations` to the RBAC Manager pod deployment.
tolerations: []
# -- Add `affinities` to the RBAC Manager pod deployment.
affinity: {}
# -- The PriorityClass name to apply to the Crossplane and RBAC Manager pods.
priorityClassName: ""
resourcesCrossplane:
limits:
# -- CPU resource limits for the Crossplane pod.
cpu: 500m
# -- Memory resource limits for the Crossplane pod.
memory: 1024Mi
requests:
# -- CPU resource requests for the Crossplane pod.
cpu: 100m
# -- Memory resource requests for the Crossplane pod.
memory: 256Mi
securityContextCrossplane:
# -- The user ID used by the Crossplane pod.
runAsUser: 65532
# -- The group ID used by the Crossplane pod.
runAsGroup: 65532
# -- Enable `allowPrivilegeEscalation` for the Crossplane pod.
allowPrivilegeEscalation: false
# -- Set the Crossplane pod root file system as read-only.
readOnlyRootFilesystem: true
packageCache:
# -- Set to `Memory` to hold the package cache in a RAM backed file system. Useful for Crossplane development.
medium: ""
# -- The size limit for the package cache. If medium is `Memory` the `sizeLimit` can't exceed Node memory.
sizeLimit: 20Mi
# -- The name of a PersistentVolumeClaim to use as the package cache. Disables the default package cache `emptyDir` Volume.
pvc: ""
# -- The name of a ConfigMap to use as the package cache. Disables the default package cache `emptyDir` Volume.
configMap: ""
resourcesRBACManager:
limits:
# -- CPU resource limits for the RBAC Manager pod.
cpu: 100m
# -- Memory resource limits for the RBAC Manager pod.
memory: 512Mi
requests:
# -- CPU resource requests for the RBAC Manager pod.
cpu: 100m
# -- Memory resource requests for the RBAC Manager pod.
memory: 256Mi
securityContextRBACManager:
# -- The user ID used by the RBAC Manager pod.
runAsUser: 65532
# -- The group ID used by the RBAC Manager pod.
runAsGroup: 65532
# -- Enable `allowPrivilegeEscalation` for the RBAC Manager pod.
allowPrivilegeEscalation: false
# -- Set the RBAC Manager pod root file system as read-only.
readOnlyRootFilesystem: true
metrics:
# -- Enable Prometheus path, port and scrape annotations and expose port 8080 for both the Crossplane and RBAC Manager pods.
enabled: false
# -- Add custom environmental variables to the Crossplane pod deployment.
# Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`.
extraEnvVarsCrossplane: {}
# -- Add custom environmental variables to the RBAC Manager pod deployment.
# Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`.
extraEnvVarsRBACManager: {}
# -- Add a custom `securityContext` to the Crossplane pod.
podSecurityContextCrossplane: {}
# -- Add a custom `securityContext` to the RBAC Manager pod.
podSecurityContextRBACManager: {}
# -- Add custom `volumes` to the Crossplane pod.
extraVolumesCrossplane: {}
# -- Add custom `volumeMounts` to the Crossplane pod.
extraVolumeMountsCrossplane: {}
# -- To add arbitrary Kubernetes Objects during a Helm Install
extraObjects: []
# - apiVersion: pkg.crossplane.io/v1alpha1
# kind: ControllerConfig
# metadata:
# name: aws-config
# annotations:
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789101:role/example
# helm.sh/hook: post-install
# spec:
# podSecurityContext:
# fsGroup: 2000

View File

@@ -1,101 +0,0 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: ollama-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: ollama-secret
data:
- secretKey: users
remoteRef:
key: secrets/internalproxy/ollama
property: users
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: ollama-basic-auth
spec:
basicAuth:
secret: ollama-secret
---
apiVersion: v1
kind: Service
metadata:
name: ollama
spec:
ports:
- name: app
port: 11435
protocol: TCP
targetPort: 11435
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: ollama
subsets:
- addresses:
- ip: 192.168.20.104
ports:
- name: app
port: 11435
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: ollama-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`ollama.durp.info`) && PathPrefix(`/`)
middlewares:
- name: ollama-basic-auth
kind: Rule
services:
- name: ollama
port: 11435
tls:
secretName: ollama-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ollama-tls
spec:
secretName: ollama-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "ollama.durp.info"
dnsNames:
- "ollama.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: ollama-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: ollama.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -1,13 +1,11 @@
apiVersion: v2
name: durpapi
description: A Helm chart for Kubernetes
type: application
version: 0.1.0-dev0184
name: durpapi
appVersion: 0.1.0
dependencies:
- condition: postgresql.enabled
name: postgresql
version: 12.5.*
repository: https://charts.bitnami.com/bitnami
name: postgresql
version: 0.1.0-dev0184
apiVersion: v2
type: application

View File

@@ -6,7 +6,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host("api.durp.info") && PathPrefix(`/api`)
- match: Host("api.prd.durp.info") && PathPrefix(`/api`)
kind: Rule
middlewares:
- name: jwt
@@ -24,7 +24,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host("api.durp.info") && PathPrefix(`/swagger`)
- match: Host("api.prd.durp.info") && PathPrefix(`/swagger`)
kind: Rule
services:
- name: "durpapi-service"
@@ -41,4 +41,4 @@ spec:
jwt:
Required: true
Keys:
- https://authentik.durp.info/application/o/api/jwks
- https://authentik.prd.durp.info/application/o/api/jwks/

View File

@@ -10,15 +10,15 @@ deployment:
probe:
readiness:
httpGet:
path: /health/gethealth
path: /api/health/gethealth
port: 8080
liveness:
httpGet:
path: /health/gethealth
path: /api/health/gethealth
port: 8080
startup:
httpGet:
path: /health/gethealth
path: /api/health/gethealth
port: 8080
service:
type: ClusterIP

View File

@@ -9,4 +9,4 @@ appVersion: 0.0.1
dependencies:
- name: external-dns
repository: https://charts.bitnami.com/bitnami
version: 8.3.8
version: 6.20.3

View File

@@ -4,7 +4,7 @@ external-dns:
image:
pullPolicy: Always
txtPrefix: "prd-"
sources:
- service

View File

@@ -8,5 +8,5 @@ appVersion: 0.0.1
dependencies:
- name: external-secrets
repository: https://charts.external-secrets.io
version: 0.10.4
version: 0.8.1

View File

@@ -8,4 +8,4 @@ appVersion: 0.0.1
dependencies:
- name: gatekeeper
repository: https://open-policy-agent.github.io/gatekeeper/charts
version: 3.17.1
version: 3.14.0

277
gatekeeper/values.yaml Normal file
View File

@@ -0,0 +1,277 @@
gatekeeper:
replicas: 3
revisionHistoryLimit: 10
auditInterval: 60
metricsBackends: ["prometheus"]
auditMatchKindOnly: false
constraintViolationsLimit: 20
auditFromCache: false
disableMutation: false
disableValidatingWebhook: false
validatingWebhookName: gatekeeper-validating-webhook-configuration
validatingWebhookTimeoutSeconds: 3
validatingWebhookFailurePolicy: Ignore
validatingWebhookAnnotations: {}
validatingWebhookExemptNamespacesLabels: {}
validatingWebhookObjectSelector: {}
validatingWebhookCheckIgnoreFailurePolicy: Fail
validatingWebhookCustomRules: {}
validatingWebhookURL: null
enableDeleteOperations: false
enableExternalData: true
enableGeneratorResourceExpansion: true
enableTLSHealthcheck: false
maxServingThreads: -1
mutatingWebhookName: gatekeeper-mutating-webhook-configuration
mutatingWebhookFailurePolicy: Ignore
mutatingWebhookReinvocationPolicy: Never
mutatingWebhookAnnotations: {}
mutatingWebhookExemptNamespacesLabels: {}
mutatingWebhookObjectSelector: {}
mutatingWebhookTimeoutSeconds: 1
mutatingWebhookCustomRules: {}
mutatingWebhookURL: null
mutationAnnotations: false
auditChunkSize: 500
logLevel: INFO
logDenies: false
logMutations: false
emitAdmissionEvents: false
emitAuditEvents: false
admissionEventsInvolvedNamespace: false
auditEventsInvolvedNamespace: false
resourceQuota: true
externaldataProviderResponseCacheTTL: 3m
image:
repository: openpolicyagent/gatekeeper
crdRepository: openpolicyagent/gatekeeper-crds
release: v3.15.0-beta.0
pullPolicy: Always
pullSecrets: []
preInstall:
crdRepository:
image:
repository: null
tag: v3.15.0-beta.0
postUpgrade:
labelNamespace:
enabled: false
image:
repository: openpolicyagent/gatekeeper-crds
tag: v3.15.0-beta.0
pullPolicy: IfNotPresent
pullSecrets: []
extraNamespaces: []
podSecurity: ["pod-security.kubernetes.io/audit=restricted",
"pod-security.kubernetes.io/audit-version=latest",
"pod-security.kubernetes.io/warn=restricted",
"pod-security.kubernetes.io/warn-version=latest",
"pod-security.kubernetes.io/enforce=restricted",
"pod-security.kubernetes.io/enforce-version=v1.24"]
extraAnnotations: {}
priorityClassName: ""
affinity: {}
tolerations: []
nodeSelector: {kubernetes.io/os: linux}
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 999
runAsNonRoot: true
runAsUser: 1000
postInstall:
labelNamespace:
enabled: true
extraRules: []
image:
repository: openpolicyagent/gatekeeper-crds
tag: v3.15.0-beta.0
pullPolicy: IfNotPresent
pullSecrets: []
extraNamespaces: []
podSecurity: ["pod-security.kubernetes.io/audit=restricted",
"pod-security.kubernetes.io/audit-version=latest",
"pod-security.kubernetes.io/warn=restricted",
"pod-security.kubernetes.io/warn-version=latest",
"pod-security.kubernetes.io/enforce=restricted",
"pod-security.kubernetes.io/enforce-version=v1.24"]
extraAnnotations: {}
priorityClassName: ""
probeWebhook:
enabled: true
image:
repository: curlimages/curl
tag: 7.83.1
pullPolicy: IfNotPresent
pullSecrets: []
waitTimeout: 60
httpTimeout: 2
insecureHTTPS: false
priorityClassName: ""
affinity: {}
tolerations: []
nodeSelector: {kubernetes.io/os: linux}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 999
runAsNonRoot: true
runAsUser: 1000
preUninstall:
deleteWebhookConfigurations:
extraRules: []
enabled: false
image:
repository: openpolicyagent/gatekeeper-crds
tag: v3.15.0-beta.0
pullPolicy: IfNotPresent
pullSecrets: []
priorityClassName: ""
affinity: {}
tolerations: []
nodeSelector: {kubernetes.io/os: linux}
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 999
runAsNonRoot: true
runAsUser: 1000
podAnnotations: {}
auditPodAnnotations: {}
podLabels: {}
podCountLimit: "100"
secretAnnotations: {}
enableRuntimeDefaultSeccompProfile: true
controllerManager:
exemptNamespaces: []
exemptNamespacePrefixes: []
hostNetwork: false
dnsPolicy: ClusterFirst
port: 8443
metricsPort: 8888
healthPort: 9090
readinessTimeout: 1
livenessTimeout: 1
priorityClassName: system-cluster-critical
disableCertRotation: false
tlsMinVersion: 1.3
clientCertName: ""
strategyType: RollingUpdate
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: gatekeeper.sh/operation
operator: In
values:
- webhook
topologyKey: kubernetes.io/hostname
weight: 100
topologySpreadConstraints: []
tolerations: []
nodeSelector: {kubernetes.io/os: linux}
resources:
limits:
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 999
runAsNonRoot: true
runAsUser: 1000
podSecurityContext:
fsGroup: 999
supplementalGroups:
- 999
extraRules: []
networkPolicy:
enabled: false
ingress: { }
# - from:
# - ipBlock:
# cidr: 0.0.0.0/0
audit:
enablePubsub: false
connection: audit-connection
channel: audit-channel
hostNetwork: false
dnsPolicy: ClusterFirst
metricsPort: 8888
healthPort: 9090
readinessTimeout: 1
livenessTimeout: 1
priorityClassName: system-cluster-critical
disableCertRotation: false
affinity: {}
tolerations: []
nodeSelector: {kubernetes.io/os: linux}
resources:
limits:
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 999
runAsNonRoot: true
runAsUser: 1000
podSecurityContext:
fsGroup: 999
supplementalGroups:
- 999
writeToRAMDisk: false
extraRules: []
crds:
affinity: {}
tolerations: []
nodeSelector: {kubernetes.io/os: linux}
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
pdb:
controllerManager:
minAvailable: 1
service: {}
disabledBuiltins: ["{http.send}"]
psp:
enabled: true
upgradeCRDs:
enabled: true
extraRules: []
priorityClassName: ""
rbac:
create: true
externalCertInjection:
enabled: false
secretName: gatekeeper-webhook-server-cert

View File

@@ -8,4 +8,4 @@ appVersion: 0.0.1
dependencies:
- name: gitlab-runner
repository: https://charts.gitlab.io/
version: 0.69.0
version: 0.43.0

View File

@@ -8,4 +8,4 @@ appVersion: 0.0.1
dependencies:
- name: heimdall
repository: https://djjudas21.github.io/charts/
version: 8.5.4
version: 8.5.2

View File

@@ -7,7 +7,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`heimdall.durp.info`) && PathPrefix(`/`)
- match: Host(`heimdall.prd.durp.info`) && PathPrefix(`/`)
middlewares:
- name: authentik-proxy-provider
namespace: traefik
@@ -15,7 +15,7 @@ spec:
services:
- name: heimdall
port: 80
- match: Host(`heimdall.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
- match: Host(`heimdall.prd.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
kind: Rule
services:
- name: ak-outpost-authentik-embedded-outpost
@@ -35,9 +35,9 @@ spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "heimdall.durp.info"
commonName: "heimdall.prd.durp.info"
dnsNames:
- "heimdall.durp.info"
- "heimdall.prd.durp.info"
---
@@ -46,7 +46,7 @@ apiVersion: v1
metadata:
name: heimdall-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: heimdall.durp.info
external-dns.alpha.kubernetes.io/hostname: heimdall.prd.durp.info
spec:
type: ExternalName
externalName: durp.info
externalName:.prd.durp.info

View File

@@ -1,12 +0,0 @@
apiVersion: v2
name: argocd
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.16.0"
dependencies:
- name: argo-cd
repository: https://argoproj.github.io/argo-helm
version: 6.11.1

View File

@@ -1,64 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argocd
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: infra/argocd
destination:
namespace: argocd
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
---
#apiVersion: external-secrets.io/v1beta1
#kind: ExternalSecret
#metadata:
# name: vault-argocd
# labels:
# app.kubernetes.io/part-of: argocd
#spec:
# secretStoreRef:
# name: vault
# kind: ClusterSecretStore
# target:
# name: client-secret
# data:
# - secretKey: clientSecret
# remoteRef:
# key: secrets/argocd/authentik
# property: clientsecret
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: argocd-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
spec:
entryPoints:
- websecure
routes:
- match: Host(`argocd.infra.durp.info`)
middlewares:
- name: whitelist
namespace: traefik
kind: Rule
services:
- name: argocd-server
port: 443
scheme: https
tls:
secretName: argocd-tls

View File

@@ -1,12 +0,0 @@
apiVersion: v2
name: longhorn-system
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.16.0"
dependencies:
- name: longhorn
repository: https://charts.longhorn.io
version: 1.7.2

View File

@@ -1,41 +0,0 @@
#apiVersion: traefik.containo.us/v1alpha1
#kind: IngressRoute
#metadata:
# name: longhorn-ingress
#spec:
# entryPoints:
# - websecure
# routes:
# - match: Host(`longhorn.internal.durp.info`) && PathPrefix(`/`)
# middlewares:
# - name: whitelist
# namespace: traefik
# - name: authentik-proxy-provider
# namespace: traefik
# kind: Rule
# services:
# - name: longhorn-frontend
# port: 80
# - match: Host(`longhorn.internal.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
# kind: Rule
# services:
# - name: ak-outpost-authentik-embedded-outpost
# namespace: authentik
# port: 9000
# tls:
# secretName: longhorn-tls
#
#---
#
#apiVersion: cert-manager.io/v1
#kind: Certificate
#metadata:
# name: longhorn-tls
#spec:
# secretName: longhorn-tls
# issuerRef:
# name: letsencrypt-production
# kind: ClusterIssuer
# commonName: "longhorn.internal.durp.info"
# dnsNames:
# - "longhorn.internal.durp.info"

View File

@@ -1,23 +0,0 @@
#apiVersion: external-secrets.io/v1beta1
#kind: ExternalSecret
#metadata:
# name: external-longhorn-backup-token-secret
#spec:
# secretStoreRef:
# name: vault
# kind: ClusterSecretStore
# target:
# name: longhorn-backup-token-secret
# data:
# - secretKey: AWS_ACCESS_KEY_ID
# remoteRef:
# key: secrets/longhorn/backup
# property: AWS_ACCESS_KEY_ID
# - secretKey: AWS_ENDPOINTS
# remoteRef:
# key: secrets/longhorn/backup
# property: AWS_ENDPOINTS
# - secretKey: AWS_SECRET_ACCESS_KEY
# remoteRef:
# key: secrets/longhorn/backup
# property: AWS_SECRET_ACCESS_KEY

View File

@@ -1,192 +0,0 @@
longhorn:
global:
cattle:
systemDefaultRegistry: ""
image:
longhorn:
engine:
repository: longhornio/longhorn-engine
manager:
repository: longhornio/longhorn-manager
ui:
repository: longhornio/longhorn-ui
instanceManager:
repository: longhornio/longhorn-instance-manager
shareManager:
repository: longhornio/longhorn-share-manager
backingImageManager:
repository: longhornio/backing-image-manager
csi:
attacher:
repository: longhornio/csi-attacher
provisioner:
repository: longhornio/csi-provisioner
nodeDriverRegistrar:
repository: longhornio/csi-node-driver-registrar
resizer:
repository: longhornio/csi-resizer
snapshotter:
repository: longhornio/csi-snapshotter
pullPolicy: Always
service:
ui:
type: ClusterIP
nodePort: null
manager:
type: ClusterIP
nodePort: ""
loadBalancerIP: ""
loadBalancerSourceRanges: ""
persistence:
defaultClass: true
defaultFsType: ext4
defaultClassReplicaCount: 3
defaultDataLocality: disabled # best-effort otherwise
reclaimPolicy: Retain
migratable: false
recurringJobSelector:
enable: true
jobList: '[
{
"name":"backup",
"task":"backup",
"cron":"0 0 * * ?",
"retain":24
}
]'
backingImage:
enable: false
name: ~
dataSourceType: ~
dataSourceParameters: ~
expectedChecksum: ~
csi:
kubeletRootDir: ~
attacherReplicaCount: ~
provisionerReplicaCount: ~
resizerReplicaCount: ~
snapshotterReplicaCount: ~
defaultSettings:
backupTarget: S3://longhorn-infra@us-east-1/
backupTargetCredentialSecret: longhorn-backup-token-secret
allowRecurringJobWhileVolumeDetached: ~
createDefaultDiskLabeledNodes: ~
defaultDataPath: ~
defaultDataLocality: ~
replicaSoftAntiAffinity: ~
replicaAutoBalance: ~
storageOverProvisioningPercentage: ~
storageMinimalAvailablePercentage: ~
upgradeChecker: ~
defaultReplicaCount: ~
defaultLonghornStaticStorageClass: longhorn
backupstorePollInterval: ~
taintToleration: ~
systemManagedComponentsNodeSelector: ~
priorityClass: ~
autoSalvage: ~
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
disableSchedulingOnCordonedNode: ~
replicaZoneSoftAntiAffinity: ~
nodeDownPodDeletionPolicy: ~
allowNodeDrainWithLastHealthyReplica: ~
mkfsExt4Parameters: ~
disableReplicaRebuild: ~
replicaReplenishmentWaitInterval: ~
concurrentReplicaRebuildPerNodeLimit: ~
disableRevisionCounter: ~
systemManagedPodsImagePullPolicy: ~
allowVolumeCreationWithDegradedAvailability: ~
autoCleanupSystemGeneratedSnapshot: ~
concurrentAutomaticEngineUpgradePerNodeLimit: ~
backingImageCleanupWaitInterval: ~
backingImageRecoveryWaitInterval: ~
guaranteedEngineManagerCPU: ~
guaranteedReplicaManagerCPU: ~
kubernetesClusterAutoscalerEnabled: ~
orphanAutoDeletion: ~
storageNetwork: ~
privateRegistry:
createSecret: ~
registryUrl: ~
registryUser: ~
registryPasswd: ~
registrySecret: ~
longhornManager:
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornDriver:
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornUI:
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
#
ingress:
enabled: false
## Specify override namespace, specifically this is useful for using longhorn as sub-chart
## and its release namespace is not the `longhorn-system`
namespaceOverride: ""
# Annotations to add to the Longhorn Manager DaemonSet Pods. Optional.
annotations: {}
serviceAccount:
# Annotations to add to the service account
annotations: {}

View File

@@ -1,17 +0,0 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: cheap
spec:
addresses:
- 192.168.12.130-192.168.12.140
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: pool
namespace: metallb-system
spec:
ipAddressPools:
- cheap

View File

@@ -1,47 +0,0 @@
traefik:
image:
registry: registry.durp.info
repository: traefik
pullPolicy: Always
deployment:
replicas: 3
revisionHistoryLimit: 1
ingressRoute:
dashboard:
enabled: true
additionalArguments:
- "--serversTransport.insecureSkipVerify=true"
- "--log.level=DEBUG"
- --experimental.plugins.jwt.moduleName=github.com/traefik-plugins/traefik-jwt-plugin
- --experimental.plugins.jwt.version=v0.7.0
autoscaling:
enabled: true
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Pods
value: 1
periodSeconds: 60
# -- [Resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for `traefik` container.
resources:
requests:
cpu: "100m"
memory: "512Mi"
limits:
memory: "512Mi"

View File

@@ -1,12 +0,0 @@
apiVersion: v2
name: vault
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: vault
repository: https://helm.releases.hashicorp.com
version: 0.29.1

View File

@@ -1,100 +0,0 @@
vault:
global:
enabled: true
tlsDisable: false
resources:
requests:
memory: 256Mi
cpu: 250m
limits:
memory: 256Mi
cpu: 250m
server:
image:
repository: "hashicorp/vault"
# These Resource Limits are in line with node requirements in the
# Vault Reference Architecture for a Small Cluster
#resources:
# requests:
# memory: 8Gi
# cpu: 2000m
# limits:
# memory: 16Gi
# cpu: 2000m
# For HA configuration and because we need to manually init the vault,
# we need to define custom readiness/liveness Probe settings
readinessProbe:
enabled: true
path: "/v1/sys/health?standbyok=true&sealedcode=204&uninitcode=204"
livenessProbe:
enabled: true
path: "/v1/sys/health?standbyok=true"
initialDelaySeconds: 60
# extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be
# used to include variables required for auto-unseal.
extraEnvironmentVars:
VAULT_CACERT: /vault/userconfig/vault-server-tls/vault.ca
volumes:
- name: userconfig-vault-server-tls
secret:
defaultMode: 420
secretName: vault-server-tls
volumeMounts:
- mountPath: /vault/userconfig/vault-server-tls
name: userconfig-vault-server-tls
readOnly: true
# This configures the Vault Statefulset to create a PVC for audit logs.
# See https://www.vaultproject.io/docs/audit/index.html to know more
auditStorage:
enabled: true
standalone:
enabled: false
# Run Vault in "HA" mode.
ha:
enabled: true
replicas: 3
raft:
enabled: true
setNodeId: true
config: |
ui = true
cluster_name = "vault-integrated-storage"
listener "tcp" {
address = "[::]:8200"
cluster_address = "[::]:8201"
tls_cert_file = "/vault/userconfig/vault-server-tls/vault.crt"
tls_key_file = "/vault/userconfig/vault-server-tls/vault.key"
tls_client_ca_file = "/vault/userconfig/vault-server-tls/vault.ca"
}
storage "raft" {
path = "/vault/data"
retry_join {
leader_api_addr = "https://vault-0.vault-internal:8200"
leader_ca_cert_file = "/vault/userconfig/vault-server-tls/vault.ca"
leader_client_cert_file = "/vault/userconfig/vault-server-tls/vault.crt"
leader_client_key_file = "/vault/userconfig/vault-server-tls/vault.ca"
}
retry_join {
leader_api_addr = "https://vault-1.vault-internal:8200"
leader_ca_cert_file = "/vault/userconfig/vault-server-tls/vault.ca"
leader_client_cert_file = "/vault/userconfig/vault-server-tls/vault.crt"
leader_client_key_file = "/vault/userconfig/vault-server-tls/vault.ca"
}
}
# Vault UI
ui:
enabled: true
serviceType: "LoadBalancer"
serviceNodePort: null
externalPort: 8200

View File

@@ -7,9 +7,9 @@ spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "api.durp.info"
commonName: "api.prd.durp.info"
dnsNames:
- "api.durp.info"
- "api.prd.durp.info"
---
@@ -21,7 +21,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`api.durp.info`) && PathPrefix(`/`)
- match: Host(`api.prd.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: krakend-service
@@ -37,10 +37,10 @@ apiVersion: v1
metadata:
name: api-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: api.durp.info
external-dns.alpha.kubernetes.io/hostname: api.prd.durp.info
spec:
type: ExternalName
externalName: durp.info
externalName:.prd.durp.info
---
@@ -49,7 +49,7 @@ apiVersion: v1
metadata:
name: api-developer-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: developer.durp.info
external-dns.alpha.kubernetes.io/hostname: developer.prd.durp.info
external-dns.alpha.kubernetes.io/cloudflare-proxied: "false"
spec:
type: ExternalName

View File

@@ -9,4 +9,4 @@ appVersion: "1.16.0"
dependencies:
- name: kube-prometheus-stack
repository: https://prometheus-community.github.io/helm-charts
version: 63.1.0
version: 40.5.0

View File

@@ -6,7 +6,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`grafana.durp.info`) && PathPrefix(`/`)
- match: Host(`grafana.prd.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: grafana
@@ -25,9 +25,9 @@ spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "grafana.durp.info"
commonName: "grafana.prd.durp.info"
dnsNames:
- "grafana.durp.info"
- "grafana.prd.durp.info"
---
@@ -39,7 +39,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`alertmanager.durp.info`) && PathPrefix(`/`)
- match: Host(`alertmanager.prd.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
@@ -63,9 +63,9 @@ spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "alertmanager.durp.info"
commonName: "alertmanager.prd.durp.info"
dnsNames:
- "alertmanager.durp.info"
- "alertmanager.prd.durp.info"
---
@@ -74,7 +74,7 @@ apiVersion: v1
metadata:
name: grafana-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: grafana.durp.info
external-dns.alpha.kubernetes.io/hostname: grafana.prd.durp.info
spec:
type: ExternalName
externalName: durp.info
externalName:.prd.durp.info

View File

@@ -8,4 +8,4 @@ appVersion: 0.0.1
dependencies:
- name: kubeclarity
repository: https://openclarity.github.io/kubeclarity
version: 2.23.3
version: 2.22.0

View File

@@ -6,7 +6,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`kubeclarity.durp.info`) && PathPrefix(`/`)
- match: Host(`kubeclarity.prd.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
@@ -30,9 +30,9 @@ spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "kubeclarity.durp.info"
commonName: "kubeclarity.prd.durp.info"
dnsNames:
- "kubeclarity.durp.info"
- "kubeclarity.prd.durp.info"
---
@@ -41,7 +41,7 @@ apiVersion: v1
metadata:
name: kubeclarity-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: kubeclarity.durp.info
external-dns.alpha.kubernetes.io/hostname: kubeclarity.prd.durp.info
spec:
type: ExternalName
externalName: durp.info
externalName:.prd.durp.info

View File

@@ -6,7 +6,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`links.durp.info`) && PathPrefix(`/`)
- match: Host(`links.prd.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: littlelink
@@ -25,9 +25,9 @@ spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "links.durp.info"
commonName: "links.prd.durp.info"
dnsNames:
- "links.durp.info"
- "links.prd.durp.info"
---
@@ -36,7 +36,7 @@ apiVersion: v1
metadata:
name: links-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: links.durp.info
external-dns.alpha.kubernetes.io/hostname: links.prd.durp.info
spec:
type: ExternalName
externalName: durp.info
externalName:.prd.durp.info

Some files were not shown because too many files have changed in this diff Show More