28 Commits

Author SHA1 Message Date
c6d963d635 update 2025-03-18 06:25:39 -05:00
399fadef3f update 2025-03-18 06:24:54 -05:00
9165bd44a9 update 2025-03-18 06:24:03 -05:00
093bf40127 update 2025-03-18 06:21:58 -05:00
d217345a25 update 2025-03-18 06:20:22 -05:00
3b69fdcf8f update 2025-03-18 06:19:13 -05:00
513298f3be update 2025-03-18 06:16:52 -05:00
4c4f67525d update 2025-03-18 06:15:33 -05:00
39fae09f58 update 2025-03-18 06:15:05 -05:00
22aad9eb54 update 2025-03-18 05:58:53 -05:00
2450688ab8 update 2025-03-18 05:56:39 -05:00
d73f626ae5 update 2025-03-18 05:55:10 -05:00
04e76a3e90 update 2025-03-18 05:06:45 -05:00
2e45f7b899 update 2025-03-17 06:17:20 -05:00
7f3961964d update 2025-03-17 05:35:46 -05:00
813e8a8eb3 update 2025-03-17 05:26:24 -05:00
b2a369b479 update 2025-03-17 05:24:13 -05:00
0907c0fdc8 update 2025-03-17 05:20:34 -05:00
a50b3ca1d4 update 2025-03-17 05:13:50 -05:00
9d45ebf641 update 2025-03-17 05:00:00 -05:00
961e332e71 update 2025-03-17 04:59:19 -05:00
8d63bc4ea9 update 2025-03-16 21:32:29 -05:00
fa64786dcf update 2025-03-16 21:29:37 -05:00
3590ea3840 update 2025-03-16 21:22:07 -05:00
ce6b64840e update 2025-03-16 21:15:28 -05:00
f68327747a update 2025-03-16 13:35:16 -05:00
2df7ebbf4c update 2025-03-16 13:33:00 -05:00
1c63f8028a update 2025-03-16 13:27:48 -05:00
34 changed files with 600 additions and 1135 deletions

View File

@@ -0,0 +1,42 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: authentik-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`authentik.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: infra-cluster
port: 443
tls:
secretName: authentik-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: authentik-tls
spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
secretName: authentik-tls
commonName: "authentik.durp.info"
dnsNames:
- "authentik.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: authentik-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: authentik.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -0,0 +1,45 @@
apiVersion: v1
kind: Endpoints
metadata:
name: master-cluster
subsets:
- addresses:
- ip: 192.168.20.130
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: master-cluster
spec:
ports:
- protocol: TCP
port: 443
targetPort: 443
---
apiVersion: v1
kind: Endpoints
metadata:
name: infra-cluster
subsets:
- addresses:
- ip: 192.168.12.130
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: infra-cluster
spec:
ports:
- protocol: TCP
port: 443
targetPort: 443

View File

@@ -19,7 +19,7 @@ metadata:
name: gitea name: gitea
subsets: subsets:
- addresses: - addresses:
- ip: 192.168.20.200 - ip: 192.168.21.200
ports: ports:
- name: app - name: app
port: 3000 port: 3000
@@ -27,7 +27,7 @@ subsets:
--- ---
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.io/v1alpha1
kind: IngressRoute kind: IngressRoute
metadata: metadata:
name: gitea-ingress name: gitea-ingress

View File

@@ -27,7 +27,7 @@ subsets:
--- ---
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.io/v1alpha1
kind: IngressRoute kind: IngressRoute
metadata: metadata:
name: kasm-ingress name: kasm-ingress

View File

@@ -27,7 +27,7 @@ subsets:
--- ---
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.io/v1alpha1
kind: IngressRoute kind: IngressRoute
metadata: metadata:
name: nexus-ingress name: nexus-ingress

View File

@@ -11,12 +11,12 @@ spec:
data: data:
- secretKey: users - secretKey: users
remoteRef: remoteRef:
key: secrets/internalproxy/ollama key: kv/ollama
property: users property: users
--- ---
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.io/v1alpha1
kind: Middleware kind: Middleware
metadata: metadata:
name: ollama-basic-auth name: ollama-basic-auth
@@ -56,7 +56,7 @@ subsets:
--- ---
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.io/v1alpha1
kind: IngressRoute kind: IngressRoute
metadata: metadata:
name: ollama-ingress name: ollama-ingress

View File

@@ -0,0 +1,30 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: open-webui-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`open-webui.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: master-cluster
port: 443
tls:
secretName: open-webui-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: open-webui-tls
spec:
secretName: open-webui-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "open-webui.durp.info"
dnsNames:
- "open-webui.durp.info"

View File

@@ -27,7 +27,7 @@ subsets:
--- ---
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.io/v1alpha1
kind: IngressRoute kind: IngressRoute
metadata: metadata:
name: plex-ingress name: plex-ingress

View File

@@ -1,68 +1,68 @@
apiVersion: v1 #apiVersion: v1
kind: Service #kind: Service
metadata: #metadata:
name: redlib # name: redlib
spec: #spec:
ports: # ports:
- name: app # - name: app
port: 8082 # port: 8082
protocol: TCP # protocol: TCP
targetPort: 8082 # targetPort: 8082
clusterIP: None # clusterIP: None
type: ClusterIP # type: ClusterIP
#
--- #---
#
apiVersion: v1 #apiVersion: v1
kind: Endpoints #kind: Endpoints
metadata: #metadata:
name: redlib # name: redlib
subsets: #subsets:
- addresses: #- addresses:
- ip: 192.168.20.200 # - ip: 192.168.20.200
ports: # ports:
- name: app # - name: app
port: 8082 # port: 8082
protocol: TCP # protocol: TCP
#
--- #---
#
apiVersion: traefik.containo.us/v1alpha1 #apiVersion: traefik.io/v1alpha1
kind: IngressRoute #kind: IngressRoute
metadata: #metadata:
name: redlib-ingress # name: redlib-ingress
spec: #spec:
entryPoints: # entryPoints:
- websecure # - websecure
routes: # routes:
- match: Host(`redlib.durp.info`) && PathPrefix(`/`) # - match: Host(`redlib.durp.info`) && PathPrefix(`/`)
middlewares: # middlewares:
- name: authentik-proxy-provider # - name: authentik-proxy-provider
namespace: traefik # namespace: traefik
kind: Rule # kind: Rule
services: # services:
- name: redlib # - name: redlib
port: 8082 # port: 8082
- match: Host(`redlib.durp.info`) && PathPrefix(`/outpost.goauthentik.io`) # - match: Host(`redlib.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
kind: Rule # kind: Rule
services: # services:
- name: ak-outpost-authentik-embedded-outpost # - name: ak-outpost-authentik-embedded-outpost
namespace: authentik # namespace: authentik
port: 9000 # port: 9000
tls: # tls:
secretName: redlib-tls # secretName: redlib-tls
#
--- #---
#
apiVersion: cert-manager.io/v1 #apiVersion: cert-manager.io/v1
kind: Certificate #kind: Certificate
metadata: #metadata:
name: redlib-tls # name: redlib-tls
spec: #spec:
secretName: redlib-tls # secretName: redlib-tls
issuerRef: # issuerRef:
name: letsencrypt-production # name: letsencrypt-production
kind: ClusterIssuer # kind: ClusterIssuer
commonName: "redlib.durp.info" # commonName: "redlib.durp.info"
dnsNames: # dnsNames:
- "redlib.durp.info" # - "redlib.durp.info"

View File

@@ -27,7 +27,7 @@ subsets:
--- ---
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.io/v1alpha1
kind: IngressRoute kind: IngressRoute
metadata: metadata:
name: registry-ingress name: registry-ingress

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault

View File

@@ -1,82 +1,82 @@
apiVersion: v1 #apiVersion: v1
kind: Service #kind: Service
metadata: #metadata:
name: smokeping # name: smokeping
spec: #spec:
ports: # ports:
- name: app # - name: app
port: 81 # port: 81
protocol: TCP # protocol: TCP
targetPort: 81 # targetPort: 81
clusterIP: None # clusterIP: None
type: ClusterIP # type: ClusterIP
#
--- #---
#
apiVersion: v1 #apiVersion: v1
kind: Endpoints #kind: Endpoints
metadata: #metadata:
name: smokeping # name: smokeping
subsets: #subsets:
- addresses: #- addresses:
- ip: 192.168.20.200 # - ip: 192.168.20.200
ports: # ports:
- name: app # - name: app
port: 81 # port: 81
protocol: TCP # protocol: TCP
#
--- #---
#
apiVersion: traefik.containo.us/v1alpha1 #apiVersion: traefik.io/v1alpha1
kind: IngressRoute #kind: IngressRoute
metadata: #metadata:
name: smokeping-ingress # name: smokeping-ingress
spec: #spec:
entryPoints: # entryPoints:
- websecure # - websecure
routes: # routes:
- match: Host(`smokeping.durp.info`) && PathPrefix(`/`) # - match: Host(`smokeping.durp.info`) && PathPrefix(`/`)
middlewares: # middlewares:
- name: whitelist # - name: whitelist
namespace: traefik # namespace: traefik
- name: authentik-proxy-provider # - name: authentik-proxy-provider
namespace: traefik # namespace: traefik
kind: Rule # kind: Rule
services: # services:
- name: smokeping # - name: smokeping
port: 81 # port: 81
- match: Host(`smokeping.durp.info`) && PathPrefix(`/outpost.goauthentik.io`) # - match: Host(`smokeping.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
kind: Rule # kind: Rule
services: # services:
- name: ak-outpost-authentik-embedded-outpost # - name: ak-outpost-authentik-embedded-outpost
namespace: authentik # namespace: authentik
port: 9000 # port: 9000
tls: # tls:
secretName: smokeping-tls # secretName: smokeping-tls
#
--- #---
#
apiVersion: cert-manager.io/v1 #apiVersion: cert-manager.io/v1
kind: Certificate #kind: Certificate
metadata: #metadata:
name: smokeping-tls # name: smokeping-tls
spec: #spec:
secretName: smokeping-tls # secretName: smokeping-tls
issuerRef: # issuerRef:
name: letsencrypt-production # name: letsencrypt-production
kind: ClusterIssuer # kind: ClusterIssuer
commonName: "smokeping.durp.info" # commonName: "smokeping.durp.info"
dnsNames: # dnsNames:
- "smokeping.durp.info" # - "smokeping.durp.info"
#
--- #---
#
kind: Service #kind: Service
apiVersion: v1 #apiVersion: v1
metadata: #metadata:
name: smokeping-external-dns # name: smokeping-external-dns
annotations: # annotations:
external-dns.alpha.kubernetes.io/hostname: smokeping.durp.info # external-dns.alpha.kubernetes.io/hostname: smokeping.durp.info
spec: #spec:
type: ExternalName # type: ExternalName
externalName: durp.info # externalName: durp.info

View File

@@ -1,74 +1,74 @@
apiVersion: v1 #apiVersion: v1
kind: Service #kind: Service
metadata: #metadata:
name: speedtest # name: speedtest
spec: #spec:
ports: # ports:
- name: app # - name: app
port: 6580 # port: 6580
protocol: TCP # protocol: TCP
targetPort: 6580 # targetPort: 6580
clusterIP: None # clusterIP: None
type: ClusterIP # type: ClusterIP
#
--- #---
#
apiVersion: v1 #apiVersion: v1
kind: Endpoints #kind: Endpoints
metadata: #metadata:
name: speedtest # name: speedtest
subsets: #subsets:
- addresses: #- addresses:
- ip: 192.168.20.200 # - ip: 192.168.20.200
ports: # ports:
- name: app # - name: app
port: 6580 # port: 6580
protocol: TCP # protocol: TCP
#
--- #---
#
apiVersion: traefik.containo.us/v1alpha1 #apiVersion: traefik.io/v1alpha1
kind: IngressRoute #kind: IngressRoute
metadata: #metadata:
name: speedtest-ingress # name: speedtest-ingress
spec: #spec:
entryPoints: # entryPoints:
- websecure # - websecure
routes: # routes:
- match: Host(`speedtest.durp.info`) && PathPrefix(`/`) # - match: Host(`speedtest.durp.info`) && PathPrefix(`/`)
kind: Rule # kind: Rule
middlewares: # middlewares:
- name: authentik-proxy-provider # - name: authentik-proxy-provider
namespace: traefik # namespace: traefik
services: # services:
- name: speedtest # - name: speedtest
port: 6580 # port: 6580
tls: # tls:
secretName: speedtest-tls # secretName: speedtest-tls
#
--- #---
#
apiVersion: cert-manager.io/v1 #apiVersion: cert-manager.io/v1
kind: Certificate #kind: Certificate
metadata: #metadata:
name: speedtest-tls # name: speedtest-tls
spec: #spec:
secretName: speedtest-tls # secretName: speedtest-tls
issuerRef: # issuerRef:
name: letsencrypt-production # name: letsencrypt-production
kind: ClusterIssuer # kind: ClusterIssuer
commonName: "speedtest.durp.info" # commonName: "speedtest.durp.info"
dnsNames: # dnsNames:
- "speedtest.durp.info" # - "speedtest.durp.info"
#
--- #---
#
kind: Service #kind: Service
apiVersion: v1 #apiVersion: v1
metadata: #metadata:
name: speedtest-external-dns # name: speedtest-external-dns
annotations: # annotations:
external-dns.alpha.kubernetes.io/hostname: speedtest.durp.info # external-dns.alpha.kubernetes.io/hostname: speedtest.durp.info
spec: #spec:
type: ExternalName # type: ExternalName
externalName: durp.info # externalName: durp.info

View File

@@ -1,16 +1,16 @@
apiVersion: v1 #apiVersion: v1
kind: ConfigMap #kind: ConfigMap
metadata: #metadata:
name: traefik-configmap # name: traefik-configmap
data: #data:
config.yml: | # config.yml: |
http: # http:
routers: # routers:
router0: # router0:
service: service0 # service: service0
rule: Host(`testing.durp.info`) # rule: Host(`testing.durp.info`)
services: # services:
service0: # service0:
loadBalancer: # loadBalancer:
servers: # servers:
- url: https://192.168.20.130 # - url: https://192.168.20.130

View File

@@ -1,113 +1,34 @@
apiVersion: traefik.io/v1alpha1 #apiVersion: traefik.io/v1alpha1
kind: IngressRoute #kind: IngressRoute
metadata: #metadata:
name: traefik-ingress # name: traefik-ingress
spec: #spec:
entryPoints: # entryPoints:
- websecure # - websecure
routes: # routes:
- match: Host(`traefik.durp.info`) # - match: Host(`traefik.durp.info`)
kind: Rule # kind: Rule
services: # services:
- name: api@internal # - name: api@internal
kind: TraefikService # kind: TraefikService
tls: # tls:
secretName: traefik-tls # secretName: traefik-tls
#
--- #---
#
apiVersion: cert-manager.io/v1 #apiVersion: cert-manager.io/v1
kind: Certificate #kind: Certificate
metadata: #metadata:
name: traefik-tls # name: traefik-tls
namespace: traefik # namespace: traefik
spec: #spec:
secretName: traefik-tls # secretName: traefik-tls
issuerRef: # issuerRef:
name: letsencrypt-production # name: letsencrypt-production
kind: ClusterIssuer # kind: ClusterIssuer
commonName: "traefik.durp.info" # commonName: "traefik.durp.info"
dnsNames: # dnsNames:
- "traefik.durp.info" # - "traefik.durp.info"
#
--- #---
#
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: authentik-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`authentik.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: infra-cluster
port: 443
tls:
secretName: authentik-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: authentik-tls
spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
secretName: authentik-tls
commonName: "authentik.durp.info"
dnsNames:
- "authentik.durp.info"
---
apiVersion: v1
kind: Endpoints
metadata:
name: master-cluster
subsets:
- addresses:
- ip: 192.168.20.130
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: master-cluster
spec:
ports:
- protocol: TCP
port: 443
targetPort: 443
---
apiVersion: v1
kind: Endpoints
metadata:
name: infra-cluster
subsets:
- addresses:
- ip: 192.168.12.130
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: infra-cluster
spec:
ports:
- protocol: TCP
port: 443
targetPort: 443

View File

@@ -1,7 +1,7 @@
traefik: traefik:
image: image:
registry: registry.durp.info # registry: registry.durp.info
repository: traefik # repository: traefik
pullPolicy: Always pullPolicy: Always
providers: providers:
@@ -14,17 +14,17 @@ traefik:
replicas: 3 replicas: 3
revisionHistoryLimit: 1 revisionHistoryLimit: 1
volumes: # volumes:
- name: traefik-configmap # - name: traefik-configmap
mountPath: "/config" # mountPath: "/config"
type: configMap # type: configMap
ingressRoute: ingressRoute:
dashboard: dashboard:
enabled: true enabled: true
additionalArguments: additionalArguments:
- "--providers.file.filename=/config/config.yml" # - "--providers.file.filename=/config/config.yml"
- "--serversTransport.insecureSkipVerify=true" - "--serversTransport.insecureSkipVerify=true"
- "--log.level=DEBUG" - "--log.level=DEBUG"
- --experimental.plugins.jwt.moduleName=github.com/traefik-plugins/traefik-jwt-plugin - --experimental.plugins.jwt.moduleName=github.com/traefik-plugins/traefik-jwt-plugin

View File

@@ -39,9 +39,9 @@ spec:
namespace: traefik namespace: traefik
name: dmz name: dmz
syncPolicy: syncPolicy:
managedNamespaceMetadata: # managedNamespaceMetadata:
labels: # labels:
istio-injection: enabled # istio-injection: enabled
automated: automated:
prune: true prune: true
selfHeal: true selfHeal: true

View File

@@ -1,16 +1,12 @@
apiVersion: v2 apiVersion: v2
name: litellm-helm name: litellm
description: Call all LLM APIs using the OpenAI format description: A Helm chart for Kubernetes
type: application type: application
version: 0.4.1
appVersion: v1.50.2 version: 0.1.0
appVersion: "1.16.0"
dependencies: dependencies:
- name: "postgresql" - name: "litellm-helm"
version: ">=13.3.0" version: 0.1.636
repository: https://charts.bitnami.com/bitnami repository: oci://ghcr.io/berriai/litellm-helm
condition: db.deployStandalone
- name: redis
version: ">=18.0.0"
repository: oci://registry-1.docker.io/bitnamicharts
condition: redis.enabled

View File

@@ -1,22 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "litellm.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "litellm.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "litellm.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "litellm.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@@ -1,84 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "litellm.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "litellm.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "litellm.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "litellm.labels" -}}
helm.sh/chart: {{ include "litellm.chart" . }}
{{ include "litellm.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "litellm.selectorLabels" -}}
app.kubernetes.io/name: {{ include "litellm.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "litellm.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "litellm.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Get redis service name
*/}}
{{- define "litellm.redis.serviceName" -}}
{{- if and (eq .Values.redis.architecture "standalone") .Values.redis.sentinel.enabled -}}
{{- printf "%s-%s" .Release.Name (default "redis" .Values.redis.nameOverride | trunc 63 | trimSuffix "-") -}}
{{- else -}}
{{- printf "%s-%s-master" .Release.Name (default "redis" .Values.redis.nameOverride | trunc 63 | trimSuffix "-") -}}
{{- end -}}
{{- end -}}
{{/*
Get redis service port
*/}}
{{- define "litellm.redis.port" -}}
{{- if .Values.redis.sentinel.enabled -}}
{{ .Values.redis.sentinel.service.ports.sentinel }}
{{- else -}}
{{ .Values.redis.master.service.ports.redis }}
{{- end -}}
{{- end -}}

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "litellm.fullname" . }}-config
data:
config.yaml: |
{{ .Values.proxy_config | toYaml | indent 6 }}

View File

@@ -1,182 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "litellm.fullname" . }}
labels:
{{- include "litellm.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "litellm.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap-litellm.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "litellm.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "litellm.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ include "litellm.name" . }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "main-%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: HOST
value: "{{ .Values.listen | default "0.0.0.0" }}"
- name: PORT
value: {{ .Values.service.port | quote}}
{{- if .Values.db.deployStandalone }}
- name: DATABASE_USERNAME
valueFrom:
secretKeyRef:
name: {{ include "litellm.fullname" . }}-dbcredentials
key: username
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "litellm.fullname" . }}-dbcredentials
key: password
- name: DATABASE_HOST
value: {{ .Release.Name }}-postgresql
- name: DATABASE_NAME
value: litellm
{{- else if .Values.db.useExisting }}
- name: DATABASE_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.db.secret.name }}
key: {{ .Values.db.secret.usernameKey }}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.db.secret.name }}
key: {{ .Values.db.secret.passwordKey }}
- name: DATABASE_HOST
value: {{ .Values.db.endpoint }}
- name: DATABASE_NAME
value: {{ .Values.db.database }}
- name: DATABASE_URL
value: {{ .Values.db.url | quote }}
{{- end }}
- name: PROXY_MASTER_KEY
valueFrom:
secretKeyRef:
name: {{ include "litellm.fullname" . }}-masterkey
key: masterkey
{{- if .Values.redis.enabled }}
- name: REDIS_HOST
value: {{ include "litellm.redis.serviceName" . }}
- name: REDIS_PORT
value: {{ include "litellm.redis.port" . | quote }}
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "redis.secretName" .Subcharts.redis }}
key: {{include "redis.secretPasswordKey" .Subcharts.redis }}
{{- end }}
{{- if .Values.envVars }}
{{- range $key, $val := .Values.envVars }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end }}
{{- end }}
envFrom:
{{- range .Values.environmentSecrets }}
- secretRef:
name: {{ . }}
{{- end }}
{{- range .Values.environmentConfigMaps }}
- configMapRef:
name: {{ . }}
{{- end }}
args:
- --config
- /etc/litellm/config.yaml
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
livenessProbe:
httpGet:
path: /health/liveliness
port: http
readinessProbe:
httpGet:
path: /health/readiness
port: http
# Give the container time to start up. Up to 5 minutes (10 * 30 seconds)
startupProbe:
httpGet:
path: /health/readiness
port: http
failureThreshold: 30
periodSeconds: 10
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: litellm-config
mountPath: /etc/litellm/
{{ if .Values.securityContext.readOnlyRootFilesystem }}
- name: tmp
mountPath: /tmp
- name: cache
mountPath: /.cache
- name: npm
mountPath: /.npm
{{- end }}
{{- with .Values.volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.extraContainers }}
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
{{ if .Values.securityContext.readOnlyRootFilesystem }}
- name: tmp
emptyDir:
sizeLimit: 500Mi
- name: cache
emptyDir:
sizeLimit: 500Mi
- name: npm
emptyDir:
sizeLimit: 500Mi
{{- end }}
- name: litellm-config
configMap:
name: {{ include "litellm.fullname" . }}-config
items:
- key: "config.yaml"
path: "config.yaml"
{{- with .Values.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,32 +0,0 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "litellm.fullname" . }}
labels:
{{- include "litellm.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "litellm.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@@ -1,61 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "litellm.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "litellm.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,70 +0,0 @@
{{- if .Values.migrationJob.enabled }}
# This job runs the prisma migrations for the LiteLLM DB.
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "litellm.fullname" . }}-migrations
annotations:
argocd.argoproj.io/hook: PreSync
argocd.argoproj.io/hook-delete-policy: BeforeHookCreation # delete old migration on a new deploy in case the migration needs to make updates
checksum/config: {{ toYaml .Values | sha256sum }}
spec:
template:
metadata:
annotations:
{{- with .Values.migrationJob.annotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
containers:
- name: prisma-migrations
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "main-%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
command: ["python", "litellm/proxy/prisma_migration.py"]
workingDir: "/app"
env:
{{- if .Values.db.useExisting }}
- name: DATABASE_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.db.secret.name }}
key: {{ .Values.db.secret.usernameKey }}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.db.secret.name }}
key: {{ .Values.db.secret.passwordKey }}
- name: DATABASE_HOST
value: {{ .Values.db.endpoint }}
- name: DATABASE_NAME
value: {{ .Values.db.database }}
- name: DATABASE_URL
value: {{ .Values.db.url | quote }}
{{- else }}
- name: DATABASE_URL
value: postgresql://{{ .Values.postgresql.auth.username }}:{{ .Values.postgresql.auth.password }}@{{ .Release.Name }}-postgresql/{{ .Values.postgresql.auth.database }}
{{- end }}
- name: DISABLE_SCHEMA_UPDATE
value: "false" # always run the migration from the Helm PreSync hook, override the value set
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
restartPolicy: OnFailure
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
ttlSecondsAfterFinished: {{ .Values.migrationJob.ttlSecondsAfterFinished }}
backoffLimit: {{ .Values.migrationJob.backoffLimit }}
{{- end }}

View File

@@ -1,12 +0,0 @@
{{- if .Values.db.deployStandalone -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "litellm.fullname" . }}-dbcredentials
data:
# Password for the "postgres" user
postgres-password: {{ ( index .Values.postgresql.auth "postgres-password") | default "litellm" | b64enc }}
username: {{ .Values.postgresql.auth.username | default "litellm" | b64enc }}
password: {{ .Values.postgresql.auth.password | default "litellm" | b64enc }}
type: Opaque
{{- end -}}

View File

@@ -1,8 +0,0 @@
{{ $masterkey := (.Values.masterkey | default (randAlphaNum 17)) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "litellm.fullname" . }}-masterkey
data:
masterkey: {{ $masterkey | b64enc }}
type: Opaque

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "litellm.fullname" . }}
labels:
{{- include "litellm.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "litellm.selectorLabels" . | nindent 4 }}

View File

@@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "litellm.serviceAccountName" . }}
labels:
{{- include "litellm.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

View File

@@ -1,25 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "litellm.fullname" . }}-test-connection"
labels:
{{- include "litellm.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['sh', '-c']
args:
- |
# Wait for a bit to allow the service to be ready
sleep 10
# Try multiple times with a delay between attempts
for i in $(seq 1 30); do
wget -T 5 "{{ include "litellm.fullname" . }}:{{ .Values.service.port }}/health/readiness" && exit 0
echo "Attempt $i failed, waiting..."
sleep 2
done
exit 1
restartPolicy: Never

View File

@@ -1,43 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "litellm.fullname" . }}-env-test"
labels:
{{- include "litellm.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: test
image: busybox
command: ['sh', '-c']
args:
- |
# Test DD_ENV
if [ "$DD_ENV" != "dev_helm" ]; then
echo "❌ Environment variable DD_ENV mismatch. Expected: dev_helm, Got: $DD_ENV"
exit 1
fi
echo "✅ Environment variable DD_ENV matches expected value: $DD_ENV"
# Test DD_SERVICE
if [ "$DD_SERVICE" != "litellm" ]; then
echo "❌ Environment variable DD_SERVICE mismatch. Expected: litellm, Got: $DD_SERVICE"
exit 1
fi
echo "✅ Environment variable DD_SERVICE matches expected value: $DD_SERVICE"
# Test USE_DDTRACE
if [ "$USE_DDTRACE" != "true" ]; then
echo "❌ Environment variable USE_DDTRACE mismatch. Expected: true, Got: $USE_DDTRACE"
exit 1
fi
echo "✅ Environment variable USE_DDTRACE matches expected value: $USE_DDTRACE"
env:
- name: DD_ENV
value: {{ .Values.envVars.DD_ENV | quote }}
- name: DD_SERVICE
value: {{ .Values.envVars.DD_SERVICE | quote }}
- name: USE_DDTRACE
value: {{ .Values.envVars.USE_DDTRACE | quote }}
restartPolicy: Never

View File

@@ -1,10 +1,11 @@
# Default values for litellm. litellm:
# This is a YAML-formatted file. # Default values for litellm.
# Declare variables to be passed into your templates. # This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1 replicaCount: 1
image: image:
# Use "ghcr.io/berriai/litellm-database" for optimized image with database # Use "ghcr.io/berriai/litellm-database" for optimized image with database
repository: ghcr.io/berriai/litellm-database repository: ghcr.io/berriai/litellm-database
pullPolicy: Always pullPolicy: Always
@@ -12,11 +13,11 @@ image:
# tag: "main-latest" # tag: "main-latest"
tag: "" tag: ""
imagePullSecrets: [] imagePullSecrets: []
nameOverride: "litellm" nameOverride: "litellm"
fullnameOverride: "" fullnameOverride: ""
serviceAccount: serviceAccount:
# Specifies whether a service account should be created # Specifies whether a service account should be created
create: false create: false
# Automatically mount a ServiceAccount's API credentials? # Automatically mount a ServiceAccount's API credentials?
@@ -27,13 +28,13 @@ serviceAccount:
# If not set and create is true, a name is generated using the fullname template # If not set and create is true, a name is generated using the fullname template
name: "" name: ""
podAnnotations: {} podAnnotations: {}
podLabels: {} podLabels: {}
# At the time of writing, the litellm docker image requires write access to the # At the time of writing, the litellm docker image requires write access to the
# filesystem on startup so that prisma can install some dependencies. # filesystem on startup so that prisma can install some dependencies.
podSecurityContext: {} podSecurityContext: {}
securityContext: {} securityContext: {}
# capabilities: # capabilities:
# drop: # drop:
# - ALL # - ALL
@@ -41,23 +42,23 @@ securityContext: {}
# runAsNonRoot: true # runAsNonRoot: true
# runAsUser: 1000 # runAsUser: 1000
# A list of Kubernetes Secret objects that will be exported to the LiteLLM proxy # A list of Kubernetes Secret objects that will be exported to the LiteLLM proxy
# pod as environment variables. These secrets can then be referenced in the # pod as environment variables. These secrets can then be referenced in the
# configuration file (or "litellm" ConfigMap) with `os.environ/<Env Var Name>` # configuration file (or "litellm" ConfigMap) with `os.environ/<Env Var Name>`
environmentSecrets: [] environmentSecrets: []
# - litellm-env-secret # - litellm-env-secret
# A list of Kubernetes ConfigMap objects that will be exported to the LiteLLM proxy # A list of Kubernetes ConfigMap objects that will be exported to the LiteLLM proxy
# pod as environment variables. The ConfigMap kv-pairs can then be referenced in the # pod as environment variables. The ConfigMap kv-pairs can then be referenced in the
# configuration file (or "litellm" ConfigMap) with `os.environ/<Env Var Name>` # configuration file (or "litellm" ConfigMap) with `os.environ/<Env Var Name>`
environmentConfigMaps: [] environmentConfigMaps: []
# - litellm-env-configmap # - litellm-env-configmap
service: service:
type: ClusterIP type: ClusterIP
port: 4000 port: 4000
ingress: ingress:
enabled: false enabled: false
className: "nginx" className: "nginx"
annotations: {} annotations: {}
@@ -73,12 +74,12 @@ ingress:
# hosts: # hosts:
# - chart-example.local # - chart-example.local
# masterkey: changeit # masterkey: changeit
# The elements within proxy_config are rendered as config.yaml for the proxy # The elements within proxy_config are rendered as config.yaml for the proxy
# Examples: https://github.com/BerriAI/litellm/tree/main/litellm/proxy/example_config_yaml # Examples: https://github.com/BerriAI/litellm/tree/main/litellm/proxy/example_config_yaml
# Reference: https://docs.litellm.ai/docs/proxy/configs # Reference: https://docs.litellm.ai/docs/proxy/configs
proxy_config: proxy_config:
model_list: model_list:
# At least one model must exist for the proxy to start. # At least one model must exist for the proxy to start.
- model_name: gpt-3.5-turbo - model_name: gpt-3.5-turbo
@@ -93,7 +94,7 @@ proxy_config:
general_settings: general_settings:
master_key: os.environ/PROXY_MASTER_KEY master_key: os.environ/PROXY_MASTER_KEY
resources: {} resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious # We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little # choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following # resources, such as Minikube. If you do want to specify resources, uncomment the following
@@ -105,33 +106,33 @@ resources: {}
# cpu: 100m # cpu: 100m
# memory: 128Mi # memory: 128Mi
autoscaling: autoscaling:
enabled: false enabled: false
minReplicas: 1 minReplicas: 1
maxReplicas: 100 maxReplicas: 100
targetCPUUtilizationPercentage: 80 targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80 # targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition. # Additional volumes on the output Deployment definition.
volumes: [] volumes: []
# - name: foo # - name: foo
# secret: # secret:
# secretName: mysecret # secretName: mysecret
# optional: false # optional: false
# Additional volumeMounts on the output Deployment definition. # Additional volumeMounts on the output Deployment definition.
volumeMounts: [] volumeMounts: []
# - name: foo # - name: foo
# mountPath: "/etc/foo" # mountPath: "/etc/foo"
# readOnly: true # readOnly: true
nodeSelector: {} nodeSelector: {}
tolerations: [] tolerations: []
affinity: {} affinity: {}
db: db:
# Use an existing postgres server/cluster # Use an existing postgres server/cluster
useExisting: false useExisting: false
@@ -154,9 +155,9 @@ db:
# instance. See the "postgresql" top level key for additional configuration. # instance. See the "postgresql" top level key for additional configuration.
deployStandalone: true deployStandalone: true
# Settings for Bitnami postgresql chart (if db.deployStandalone is true, ignored # Settings for Bitnami postgresql chart (if db.deployStandalone is true, ignored
# otherwise) # otherwise)
postgresql: postgresql:
architecture: standalone architecture: standalone
auth: auth:
username: litellm username: litellm
@@ -173,15 +174,15 @@ postgresql:
# secretKeys: # secretKeys:
# userPasswordKey: password # userPasswordKey: password
# requires cache: true in config file # requires cache: true in config file
# either enable this or pass a secret for REDIS_HOST, REDIS_PORT, REDIS_PASSWORD or REDIS_URL # either enable this or pass a secret for REDIS_HOST, REDIS_PORT, REDIS_PASSWORD or REDIS_URL
# with cache: true to use existing redis instance # with cache: true to use existing redis instance
redis: redis:
enabled: false enabled: false
architecture: standalone architecture: standalone
# Prisma migration job settings # Prisma migration job settings
migrationJob: migrationJob:
enabled: true # Enable or disable the schema migration Job enabled: true # Enable or disable the schema migration Job
retries: 3 # Number of retries for the Job in case of failure retries: 3 # Number of retries for the Job in case of failure
backoffLimit: 4 # Backoff limit for Job restarts backoffLimit: 4 # Backoff limit for Job restarts
@@ -189,8 +190,8 @@ migrationJob:
annotations: {} annotations: {}
ttlSecondsAfterFinished: 120 ttlSecondsAfterFinished: 120
# Additional environment variables to be added to the deployment # Additional environment variables to be added to the deployment
envVars: { envVars: {
# USE_DDTRACE: "true" # USE_DDTRACE: "true"
} }

View File

@@ -10,18 +10,18 @@ spec:
middlewares: middlewares:
- name: whitelist - name: whitelist
namespace: traefik namespace: traefik
- name: authentik-proxy-provider # - name: authentik-proxy-provider
namespace: traefik # namespace: traefik
kind: Rule kind: Rule
services: services:
- name: longhorn-frontend - name: longhorn-frontend
port: 80 port: 80
- match: Host(`longhorn.internal.durp.info`) && PathPrefix(`/outpost.goauthentik.io`) # - match: Host(`longhorn.internal.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
kind: Rule # kind: Rule
services: # services:
- name: ak-outpost-authentik-embedded-outpost # - name: ak-outpost-authentik-embedded-outpost
namespace: authentik # namespace: authentik
port: 9000 # port: 9000
tls: tls:
secretName: longhorn-tls secretName: longhorn-tls

View File

@@ -19,7 +19,7 @@ spec:
spec: spec:
containers: containers:
- name: app - name: app
image: registry.internal.durp.info/louislam/uptime-kuma:1 image: registry.durp.info/louislam/uptime-kuma:1
ports: ports:
- containerPort: 3001 - containerPort: 3001
volumeMounts: volumeMounts: