28 Commits

Author SHA1 Message Date
c6d963d635 update 2025-03-18 06:25:39 -05:00
399fadef3f update 2025-03-18 06:24:54 -05:00
9165bd44a9 update 2025-03-18 06:24:03 -05:00
093bf40127 update 2025-03-18 06:21:58 -05:00
d217345a25 update 2025-03-18 06:20:22 -05:00
3b69fdcf8f update 2025-03-18 06:19:13 -05:00
513298f3be update 2025-03-18 06:16:52 -05:00
4c4f67525d update 2025-03-18 06:15:33 -05:00
39fae09f58 update 2025-03-18 06:15:05 -05:00
22aad9eb54 update 2025-03-18 05:58:53 -05:00
2450688ab8 update 2025-03-18 05:56:39 -05:00
d73f626ae5 update 2025-03-18 05:55:10 -05:00
04e76a3e90 update 2025-03-18 05:06:45 -05:00
2e45f7b899 update 2025-03-17 06:17:20 -05:00
7f3961964d update 2025-03-17 05:35:46 -05:00
813e8a8eb3 update 2025-03-17 05:26:24 -05:00
b2a369b479 update 2025-03-17 05:24:13 -05:00
0907c0fdc8 update 2025-03-17 05:20:34 -05:00
a50b3ca1d4 update 2025-03-17 05:13:50 -05:00
9d45ebf641 update 2025-03-17 05:00:00 -05:00
961e332e71 update 2025-03-17 04:59:19 -05:00
8d63bc4ea9 update 2025-03-16 21:32:29 -05:00
fa64786dcf update 2025-03-16 21:29:37 -05:00
3590ea3840 update 2025-03-16 21:22:07 -05:00
ce6b64840e update 2025-03-16 21:15:28 -05:00
f68327747a update 2025-03-16 13:35:16 -05:00
2df7ebbf4c update 2025-03-16 13:33:00 -05:00
1c63f8028a update 2025-03-16 13:27:48 -05:00
34 changed files with 600 additions and 1135 deletions

View File

@@ -0,0 +1,42 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: authentik-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`authentik.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: infra-cluster
port: 443
tls:
secretName: authentik-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: authentik-tls
spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
secretName: authentik-tls
commonName: "authentik.durp.info"
dnsNames:
- "authentik.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: authentik-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: authentik.durp.info
spec:
type: ExternalName
externalName: durp.info

View File

@@ -0,0 +1,45 @@
apiVersion: v1
kind: Endpoints
metadata:
name: master-cluster
subsets:
- addresses:
- ip: 192.168.20.130
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: master-cluster
spec:
ports:
- protocol: TCP
port: 443
targetPort: 443
---
apiVersion: v1
kind: Endpoints
metadata:
name: infra-cluster
subsets:
- addresses:
- ip: 192.168.12.130
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: infra-cluster
spec:
ports:
- protocol: TCP
port: 443
targetPort: 443

View File

@@ -19,7 +19,7 @@ metadata:
name: gitea
subsets:
- addresses:
- ip: 192.168.20.200
- ip: 192.168.21.200
ports:
- name: app
port: 3000
@@ -27,7 +27,7 @@ subsets:
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: gitea-ingress

View File

@@ -27,7 +27,7 @@ subsets:
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: kasm-ingress

View File

@@ -27,7 +27,7 @@ subsets:
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: nexus-ingress

View File

@@ -11,12 +11,12 @@ spec:
data:
- secretKey: users
remoteRef:
key: secrets/internalproxy/ollama
key: kv/ollama
property: users
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: ollama-basic-auth
@@ -56,7 +56,7 @@ subsets:
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: ollama-ingress

View File

@@ -0,0 +1,30 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: open-webui-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`open-webui.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: master-cluster
port: 443
tls:
secretName: open-webui-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: open-webui-tls
spec:
secretName: open-webui-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "open-webui.durp.info"
dnsNames:
- "open-webui.durp.info"

View File

@@ -27,7 +27,7 @@ subsets:
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: plex-ingress

View File

@@ -1,68 +1,68 @@
apiVersion: v1
kind: Service
metadata:
name: redlib
spec:
ports:
- name: app
port: 8082
protocol: TCP
targetPort: 8082
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: redlib
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 8082
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: redlib-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`redlib.durp.info`) && PathPrefix(`/`)
middlewares:
- name: authentik-proxy-provider
namespace: traefik
kind: Rule
services:
- name: redlib
port: 8082
- match: Host(`redlib.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
kind: Rule
services:
- name: ak-outpost-authentik-embedded-outpost
namespace: authentik
port: 9000
tls:
secretName: redlib-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: redlib-tls
spec:
secretName: redlib-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "redlib.durp.info"
dnsNames:
- "redlib.durp.info"
#apiVersion: v1
#kind: Service
#metadata:
# name: redlib
#spec:
# ports:
# - name: app
# port: 8082
# protocol: TCP
# targetPort: 8082
# clusterIP: None
# type: ClusterIP
#
#---
#
#apiVersion: v1
#kind: Endpoints
#metadata:
# name: redlib
#subsets:
#- addresses:
# - ip: 192.168.20.200
# ports:
# - name: app
# port: 8082
# protocol: TCP
#
#---
#
#apiVersion: traefik.io/v1alpha1
#kind: IngressRoute
#metadata:
# name: redlib-ingress
#spec:
# entryPoints:
# - websecure
# routes:
# - match: Host(`redlib.durp.info`) && PathPrefix(`/`)
# middlewares:
# - name: authentik-proxy-provider
# namespace: traefik
# kind: Rule
# services:
# - name: redlib
# port: 8082
# - match: Host(`redlib.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
# kind: Rule
# services:
# - name: ak-outpost-authentik-embedded-outpost
# namespace: authentik
# port: 9000
# tls:
# secretName: redlib-tls
#
#---
#
#apiVersion: cert-manager.io/v1
#kind: Certificate
#metadata:
# name: redlib-tls
#spec:
# secretName: redlib-tls
# issuerRef:
# name: letsencrypt-production
# kind: ClusterIssuer
# commonName: "redlib.durp.info"
# dnsNames:
# - "redlib.durp.info"

View File

@@ -27,7 +27,7 @@ subsets:
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: registry-ingress

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault

View File

@@ -1,82 +1,82 @@
apiVersion: v1
kind: Service
metadata:
name: smokeping
spec:
ports:
- name: app
port: 81
protocol: TCP
targetPort: 81
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: smokeping
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 81
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: smokeping-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`smokeping.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
- name: authentik-proxy-provider
namespace: traefik
kind: Rule
services:
- name: smokeping
port: 81
- match: Host(`smokeping.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
kind: Rule
services:
- name: ak-outpost-authentik-embedded-outpost
namespace: authentik
port: 9000
tls:
secretName: smokeping-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: smokeping-tls
spec:
secretName: smokeping-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "smokeping.durp.info"
dnsNames:
- "smokeping.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: smokeping-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: smokeping.durp.info
spec:
type: ExternalName
externalName: durp.info
#apiVersion: v1
#kind: Service
#metadata:
# name: smokeping
#spec:
# ports:
# - name: app
# port: 81
# protocol: TCP
# targetPort: 81
# clusterIP: None
# type: ClusterIP
#
#---
#
#apiVersion: v1
#kind: Endpoints
#metadata:
# name: smokeping
#subsets:
#- addresses:
# - ip: 192.168.20.200
# ports:
# - name: app
# port: 81
# protocol: TCP
#
#---
#
#apiVersion: traefik.io/v1alpha1
#kind: IngressRoute
#metadata:
# name: smokeping-ingress
#spec:
# entryPoints:
# - websecure
# routes:
# - match: Host(`smokeping.durp.info`) && PathPrefix(`/`)
# middlewares:
# - name: whitelist
# namespace: traefik
# - name: authentik-proxy-provider
# namespace: traefik
# kind: Rule
# services:
# - name: smokeping
# port: 81
# - match: Host(`smokeping.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
# kind: Rule
# services:
# - name: ak-outpost-authentik-embedded-outpost
# namespace: authentik
# port: 9000
# tls:
# secretName: smokeping-tls
#
#---
#
#apiVersion: cert-manager.io/v1
#kind: Certificate
#metadata:
# name: smokeping-tls
#spec:
# secretName: smokeping-tls
# issuerRef:
# name: letsencrypt-production
# kind: ClusterIssuer
# commonName: "smokeping.durp.info"
# dnsNames:
# - "smokeping.durp.info"
#
#---
#
#kind: Service
#apiVersion: v1
#metadata:
# name: smokeping-external-dns
# annotations:
# external-dns.alpha.kubernetes.io/hostname: smokeping.durp.info
#spec:
# type: ExternalName
# externalName: durp.info

View File

@@ -1,74 +1,74 @@
apiVersion: v1
kind: Service
metadata:
name: speedtest
spec:
ports:
- name: app
port: 6580
protocol: TCP
targetPort: 6580
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: speedtest
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 6580
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: speedtest-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`speedtest.durp.info`) && PathPrefix(`/`)
kind: Rule
middlewares:
- name: authentik-proxy-provider
namespace: traefik
services:
- name: speedtest
port: 6580
tls:
secretName: speedtest-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: speedtest-tls
spec:
secretName: speedtest-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "speedtest.durp.info"
dnsNames:
- "speedtest.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: speedtest-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: speedtest.durp.info
spec:
type: ExternalName
externalName: durp.info
#apiVersion: v1
#kind: Service
#metadata:
# name: speedtest
#spec:
# ports:
# - name: app
# port: 6580
# protocol: TCP
# targetPort: 6580
# clusterIP: None
# type: ClusterIP
#
#---
#
#apiVersion: v1
#kind: Endpoints
#metadata:
# name: speedtest
#subsets:
#- addresses:
# - ip: 192.168.20.200
# ports:
# - name: app
# port: 6580
# protocol: TCP
#
#---
#
#apiVersion: traefik.io/v1alpha1
#kind: IngressRoute
#metadata:
# name: speedtest-ingress
#spec:
# entryPoints:
# - websecure
# routes:
# - match: Host(`speedtest.durp.info`) && PathPrefix(`/`)
# kind: Rule
# middlewares:
# - name: authentik-proxy-provider
# namespace: traefik
# services:
# - name: speedtest
# port: 6580
# tls:
# secretName: speedtest-tls
#
#---
#
#apiVersion: cert-manager.io/v1
#kind: Certificate
#metadata:
# name: speedtest-tls
#spec:
# secretName: speedtest-tls
# issuerRef:
# name: letsencrypt-production
# kind: ClusterIssuer
# commonName: "speedtest.durp.info"
# dnsNames:
# - "speedtest.durp.info"
#
#---
#
#kind: Service
#apiVersion: v1
#metadata:
# name: speedtest-external-dns
# annotations:
# external-dns.alpha.kubernetes.io/hostname: speedtest.durp.info
#spec:
# type: ExternalName
# externalName: durp.info

View File

@@ -1,16 +1,16 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: traefik-configmap
data:
config.yml: |
http:
routers:
router0:
service: service0
rule: Host(`testing.durp.info`)
services:
service0:
loadBalancer:
servers:
- url: https://192.168.20.130
#apiVersion: v1
#kind: ConfigMap
#metadata:
# name: traefik-configmap
#data:
# config.yml: |
# http:
# routers:
# router0:
# service: service0
# rule: Host(`testing.durp.info`)
# services:
# service0:
# loadBalancer:
# servers:
# - url: https://192.168.20.130

View File

@@ -1,113 +1,34 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: traefik-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`traefik.durp.info`)
kind: Rule
services:
- name: api@internal
kind: TraefikService
tls:
secretName: traefik-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: traefik-tls
namespace: traefik
spec:
secretName: traefik-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "traefik.durp.info"
dnsNames:
- "traefik.durp.info"
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: authentik-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`authentik.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: infra-cluster
port: 443
tls:
secretName: authentik-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: authentik-tls
spec:
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
secretName: authentik-tls
commonName: "authentik.durp.info"
dnsNames:
- "authentik.durp.info"
---
apiVersion: v1
kind: Endpoints
metadata:
name: master-cluster
subsets:
- addresses:
- ip: 192.168.20.130
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: master-cluster
spec:
ports:
- protocol: TCP
port: 443
targetPort: 443
---
apiVersion: v1
kind: Endpoints
metadata:
name: infra-cluster
subsets:
- addresses:
- ip: 192.168.12.130
ports:
- port: 443
---
apiVersion: v1
kind: Service
metadata:
name: infra-cluster
spec:
ports:
- protocol: TCP
port: 443
targetPort: 443
#apiVersion: traefik.io/v1alpha1
#kind: IngressRoute
#metadata:
# name: traefik-ingress
#spec:
# entryPoints:
# - websecure
# routes:
# - match: Host(`traefik.durp.info`)
# kind: Rule
# services:
# - name: api@internal
# kind: TraefikService
# tls:
# secretName: traefik-tls
#
#---
#
#apiVersion: cert-manager.io/v1
#kind: Certificate
#metadata:
# name: traefik-tls
# namespace: traefik
#spec:
# secretName: traefik-tls
# issuerRef:
# name: letsencrypt-production
# kind: ClusterIssuer
# commonName: "traefik.durp.info"
# dnsNames:
# - "traefik.durp.info"
#
#---
#

View File

@@ -1,7 +1,7 @@
traefik:
image:
registry: registry.durp.info
repository: traefik
# registry: registry.durp.info
# repository: traefik
pullPolicy: Always
providers:
@@ -14,17 +14,17 @@ traefik:
replicas: 3
revisionHistoryLimit: 1
volumes:
- name: traefik-configmap
mountPath: "/config"
type: configMap
# volumes:
# - name: traefik-configmap
# mountPath: "/config"
# type: configMap
ingressRoute:
dashboard:
enabled: true
additionalArguments:
- "--providers.file.filename=/config/config.yml"
# - "--providers.file.filename=/config/config.yml"
- "--serversTransport.insecureSkipVerify=true"
- "--log.level=DEBUG"
- --experimental.plugins.jwt.moduleName=github.com/traefik-plugins/traefik-jwt-plugin

View File

@@ -39,9 +39,9 @@ spec:
namespace: traefik
name: dmz
syncPolicy:
managedNamespaceMetadata:
labels:
istio-injection: enabled
# managedNamespaceMetadata:
# labels:
# istio-injection: enabled
automated:
prune: true
selfHeal: true

View File

@@ -1,16 +1,12 @@
apiVersion: v2
name: litellm-helm
description: Call all LLM APIs using the OpenAI format
name: litellm
description: A Helm chart for Kubernetes
type: application
version: 0.4.1
appVersion: v1.50.2
version: 0.1.0
appVersion: "1.16.0"
dependencies:
- name: "postgresql"
version: ">=13.3.0"
repository: https://charts.bitnami.com/bitnami
condition: db.deployStandalone
- name: redis
version: ">=18.0.0"
repository: oci://registry-1.docker.io/bitnamicharts
condition: redis.enabled
- name: "litellm-helm"
version: 0.1.636
repository: oci://ghcr.io/berriai/litellm-helm

View File

@@ -1,22 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "litellm.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "litellm.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "litellm.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "litellm.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@@ -1,84 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "litellm.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "litellm.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "litellm.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "litellm.labels" -}}
helm.sh/chart: {{ include "litellm.chart" . }}
{{ include "litellm.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "litellm.selectorLabels" -}}
app.kubernetes.io/name: {{ include "litellm.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "litellm.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "litellm.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Get redis service name
*/}}
{{- define "litellm.redis.serviceName" -}}
{{- if and (eq .Values.redis.architecture "standalone") .Values.redis.sentinel.enabled -}}
{{- printf "%s-%s" .Release.Name (default "redis" .Values.redis.nameOverride | trunc 63 | trimSuffix "-") -}}
{{- else -}}
{{- printf "%s-%s-master" .Release.Name (default "redis" .Values.redis.nameOverride | trunc 63 | trimSuffix "-") -}}
{{- end -}}
{{- end -}}
{{/*
Get redis service port
*/}}
{{- define "litellm.redis.port" -}}
{{- if .Values.redis.sentinel.enabled -}}
{{ .Values.redis.sentinel.service.ports.sentinel }}
{{- else -}}
{{ .Values.redis.master.service.ports.redis }}
{{- end -}}
{{- end -}}

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "litellm.fullname" . }}-config
data:
config.yaml: |
{{ .Values.proxy_config | toYaml | indent 6 }}

View File

@@ -1,182 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "litellm.fullname" . }}
labels:
{{- include "litellm.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "litellm.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap-litellm.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "litellm.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "litellm.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ include "litellm.name" . }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "main-%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: HOST
value: "{{ .Values.listen | default "0.0.0.0" }}"
- name: PORT
value: {{ .Values.service.port | quote}}
{{- if .Values.db.deployStandalone }}
- name: DATABASE_USERNAME
valueFrom:
secretKeyRef:
name: {{ include "litellm.fullname" . }}-dbcredentials
key: username
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "litellm.fullname" . }}-dbcredentials
key: password
- name: DATABASE_HOST
value: {{ .Release.Name }}-postgresql
- name: DATABASE_NAME
value: litellm
{{- else if .Values.db.useExisting }}
- name: DATABASE_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.db.secret.name }}
key: {{ .Values.db.secret.usernameKey }}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.db.secret.name }}
key: {{ .Values.db.secret.passwordKey }}
- name: DATABASE_HOST
value: {{ .Values.db.endpoint }}
- name: DATABASE_NAME
value: {{ .Values.db.database }}
- name: DATABASE_URL
value: {{ .Values.db.url | quote }}
{{- end }}
- name: PROXY_MASTER_KEY
valueFrom:
secretKeyRef:
name: {{ include "litellm.fullname" . }}-masterkey
key: masterkey
{{- if .Values.redis.enabled }}
- name: REDIS_HOST
value: {{ include "litellm.redis.serviceName" . }}
- name: REDIS_PORT
value: {{ include "litellm.redis.port" . | quote }}
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "redis.secretName" .Subcharts.redis }}
key: {{include "redis.secretPasswordKey" .Subcharts.redis }}
{{- end }}
{{- if .Values.envVars }}
{{- range $key, $val := .Values.envVars }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end }}
{{- end }}
envFrom:
{{- range .Values.environmentSecrets }}
- secretRef:
name: {{ . }}
{{- end }}
{{- range .Values.environmentConfigMaps }}
- configMapRef:
name: {{ . }}
{{- end }}
args:
- --config
- /etc/litellm/config.yaml
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
livenessProbe:
httpGet:
path: /health/liveliness
port: http
readinessProbe:
httpGet:
path: /health/readiness
port: http
# Give the container time to start up. Up to 5 minutes (10 * 30 seconds)
startupProbe:
httpGet:
path: /health/readiness
port: http
failureThreshold: 30
periodSeconds: 10
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: litellm-config
mountPath: /etc/litellm/
{{ if .Values.securityContext.readOnlyRootFilesystem }}
- name: tmp
mountPath: /tmp
- name: cache
mountPath: /.cache
- name: npm
mountPath: /.npm
{{- end }}
{{- with .Values.volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.extraContainers }}
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
{{ if .Values.securityContext.readOnlyRootFilesystem }}
- name: tmp
emptyDir:
sizeLimit: 500Mi
- name: cache
emptyDir:
sizeLimit: 500Mi
- name: npm
emptyDir:
sizeLimit: 500Mi
{{- end }}
- name: litellm-config
configMap:
name: {{ include "litellm.fullname" . }}-config
items:
- key: "config.yaml"
path: "config.yaml"
{{- with .Values.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,32 +0,0 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "litellm.fullname" . }}
labels:
{{- include "litellm.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "litellm.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@@ -1,61 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "litellm.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "litellm.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,70 +0,0 @@
{{- if .Values.migrationJob.enabled }}
# This job runs the prisma migrations for the LiteLLM DB.
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "litellm.fullname" . }}-migrations
annotations:
argocd.argoproj.io/hook: PreSync
argocd.argoproj.io/hook-delete-policy: BeforeHookCreation # delete old migration on a new deploy in case the migration needs to make updates
checksum/config: {{ toYaml .Values | sha256sum }}
spec:
template:
metadata:
annotations:
{{- with .Values.migrationJob.annotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
containers:
- name: prisma-migrations
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "main-%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
command: ["python", "litellm/proxy/prisma_migration.py"]
workingDir: "/app"
env:
{{- if .Values.db.useExisting }}
- name: DATABASE_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.db.secret.name }}
key: {{ .Values.db.secret.usernameKey }}
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.db.secret.name }}
key: {{ .Values.db.secret.passwordKey }}
- name: DATABASE_HOST
value: {{ .Values.db.endpoint }}
- name: DATABASE_NAME
value: {{ .Values.db.database }}
- name: DATABASE_URL
value: {{ .Values.db.url | quote }}
{{- else }}
- name: DATABASE_URL
value: postgresql://{{ .Values.postgresql.auth.username }}:{{ .Values.postgresql.auth.password }}@{{ .Release.Name }}-postgresql/{{ .Values.postgresql.auth.database }}
{{- end }}
- name: DISABLE_SCHEMA_UPDATE
value: "false" # always run the migration from the Helm PreSync hook, override the value set
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
restartPolicy: OnFailure
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
ttlSecondsAfterFinished: {{ .Values.migrationJob.ttlSecondsAfterFinished }}
backoffLimit: {{ .Values.migrationJob.backoffLimit }}
{{- end }}

View File

@@ -1,12 +0,0 @@
{{- if .Values.db.deployStandalone -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "litellm.fullname" . }}-dbcredentials
data:
# Password for the "postgres" user
postgres-password: {{ ( index .Values.postgresql.auth "postgres-password") | default "litellm" | b64enc }}
username: {{ .Values.postgresql.auth.username | default "litellm" | b64enc }}
password: {{ .Values.postgresql.auth.password | default "litellm" | b64enc }}
type: Opaque
{{- end -}}

View File

@@ -1,8 +0,0 @@
{{ $masterkey := (.Values.masterkey | default (randAlphaNum 17)) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "litellm.fullname" . }}-masterkey
data:
masterkey: {{ $masterkey | b64enc }}
type: Opaque

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "litellm.fullname" . }}
labels:
{{- include "litellm.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "litellm.selectorLabels" . | nindent 4 }}

View File

@@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "litellm.serviceAccountName" . }}
labels:
{{- include "litellm.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

View File

@@ -1,25 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "litellm.fullname" . }}-test-connection"
labels:
{{- include "litellm.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['sh', '-c']
args:
- |
# Wait for a bit to allow the service to be ready
sleep 10
# Try multiple times with a delay between attempts
for i in $(seq 1 30); do
wget -T 5 "{{ include "litellm.fullname" . }}:{{ .Values.service.port }}/health/readiness" && exit 0
echo "Attempt $i failed, waiting..."
sleep 2
done
exit 1
restartPolicy: Never

View File

@@ -1,43 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "litellm.fullname" . }}-env-test"
labels:
{{- include "litellm.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: test
image: busybox
command: ['sh', '-c']
args:
- |
# Test DD_ENV
if [ "$DD_ENV" != "dev_helm" ]; then
echo "❌ Environment variable DD_ENV mismatch. Expected: dev_helm, Got: $DD_ENV"
exit 1
fi
echo "✅ Environment variable DD_ENV matches expected value: $DD_ENV"
# Test DD_SERVICE
if [ "$DD_SERVICE" != "litellm" ]; then
echo "❌ Environment variable DD_SERVICE mismatch. Expected: litellm, Got: $DD_SERVICE"
exit 1
fi
echo "✅ Environment variable DD_SERVICE matches expected value: $DD_SERVICE"
# Test USE_DDTRACE
if [ "$USE_DDTRACE" != "true" ]; then
echo "❌ Environment variable USE_DDTRACE mismatch. Expected: true, Got: $USE_DDTRACE"
exit 1
fi
echo "✅ Environment variable USE_DDTRACE matches expected value: $USE_DDTRACE"
env:
- name: DD_ENV
value: {{ .Values.envVars.DD_ENV | quote }}
- name: DD_SERVICE
value: {{ .Values.envVars.DD_SERVICE | quote }}
- name: USE_DDTRACE
value: {{ .Values.envVars.USE_DDTRACE | quote }}
restartPolicy: Never

View File

@@ -1,196 +1,197 @@
# Default values for litellm.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
litellm:
# Default values for litellm.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
replicaCount: 1
image:
# Use "ghcr.io/berriai/litellm-database" for optimized image with database
repository: ghcr.io/berriai/litellm-database
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
# tag: "main-latest"
tag: ""
image:
# Use "ghcr.io/berriai/litellm-database" for optimized image with database
repository: ghcr.io/berriai/litellm-database
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
# tag: "main-latest"
tag: ""
imagePullSecrets: []
nameOverride: "litellm"
fullnameOverride: ""
imagePullSecrets: []
nameOverride: "litellm"
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: false
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
serviceAccount:
# Specifies whether a service account should be created
create: false
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podLabels: {}
podAnnotations: {}
podLabels: {}
# At the time of writing, the litellm docker image requires write access to the
# filesystem on startup so that prisma can install some dependencies.
podSecurityContext: {}
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: false
# runAsNonRoot: true
# runAsUser: 1000
# At the time of writing, the litellm docker image requires write access to the
# filesystem on startup so that prisma can install some dependencies.
podSecurityContext: {}
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: false
# runAsNonRoot: true
# runAsUser: 1000
# A list of Kubernetes Secret objects that will be exported to the LiteLLM proxy
# pod as environment variables. These secrets can then be referenced in the
# configuration file (or "litellm" ConfigMap) with `os.environ/<Env Var Name>`
environmentSecrets: []
# - litellm-env-secret
# A list of Kubernetes Secret objects that will be exported to the LiteLLM proxy
# pod as environment variables. These secrets can then be referenced in the
# configuration file (or "litellm" ConfigMap) with `os.environ/<Env Var Name>`
environmentSecrets: []
# - litellm-env-secret
# A list of Kubernetes ConfigMap objects that will be exported to the LiteLLM proxy
# pod as environment variables. The ConfigMap kv-pairs can then be referenced in the
# configuration file (or "litellm" ConfigMap) with `os.environ/<Env Var Name>`
environmentConfigMaps: []
# - litellm-env-configmap
# A list of Kubernetes ConfigMap objects that will be exported to the LiteLLM proxy
# pod as environment variables. The ConfigMap kv-pairs can then be referenced in the
# configuration file (or "litellm" ConfigMap) with `os.environ/<Env Var Name>`
environmentConfigMaps: []
# - litellm-env-configmap
service:
type: ClusterIP
port: 4000
service:
type: ClusterIP
port: 4000
ingress:
enabled: false
className: "nginx"
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: api.example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
ingress:
enabled: false
className: "nginx"
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: api.example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# masterkey: changeit
# masterkey: changeit
# The elements within proxy_config are rendered as config.yaml for the proxy
# Examples: https://github.com/BerriAI/litellm/tree/main/litellm/proxy/example_config_yaml
# Reference: https://docs.litellm.ai/docs/proxy/configs
proxy_config:
model_list:
# At least one model must exist for the proxy to start.
- model_name: gpt-3.5-turbo
litellm_params:
model: gpt-3.5-turbo
api_key: eXaMpLeOnLy
- model_name: fake-openai-endpoint
litellm_params:
model: openai/fake
api_key: fake-key
api_base: https://exampleopenaiendpoint-production.up.railway.app/
general_settings:
master_key: os.environ/PROXY_MASTER_KEY
# The elements within proxy_config are rendered as config.yaml for the proxy
# Examples: https://github.com/BerriAI/litellm/tree/main/litellm/proxy/example_config_yaml
# Reference: https://docs.litellm.ai/docs/proxy/configs
proxy_config:
model_list:
# At least one model must exist for the proxy to start.
- model_name: gpt-3.5-turbo
litellm_params:
model: gpt-3.5-turbo
api_key: eXaMpLeOnLy
- model_name: fake-openai-endpoint
litellm_params:
model: openai/fake
api_key: fake-key
api_base: https://exampleopenaiendpoint-production.up.railway.app/
general_settings:
master_key: os.environ/PROXY_MASTER_KEY
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
nodeSelector: {}
tolerations: []
tolerations: []
affinity: {}
affinity: {}
db:
# Use an existing postgres server/cluster
useExisting: false
db:
# Use an existing postgres server/cluster
useExisting: false
# How to connect to the existing postgres server/cluster
endpoint: localhost
database: litellm
url: postgresql://$(DATABASE_USERNAME):$(DATABASE_PASSWORD)@$(DATABASE_HOST)/$(DATABASE_NAME)
secret:
name: postgres
usernameKey: username
passwordKey: password
# Use the Stackgres Helm chart to deploy an instance of a Stackgres cluster.
# The Stackgres Operator must already be installed within the target
# Kubernetes cluster.
# TODO: Stackgres deployment currently unsupported
useStackgresOperator: false
# Use the Postgres Helm chart to create a single node, stand alone postgres
# instance. See the "postgresql" top level key for additional configuration.
deployStandalone: true
# Settings for Bitnami postgresql chart (if db.deployStandalone is true, ignored
# otherwise)
postgresql:
architecture: standalone
auth:
username: litellm
# How to connect to the existing postgres server/cluster
endpoint: localhost
database: litellm
url: postgresql://$(DATABASE_USERNAME):$(DATABASE_PASSWORD)@$(DATABASE_HOST)/$(DATABASE_NAME)
secret:
name: postgres
usernameKey: username
passwordKey: password
# You should override these on the helm command line with
# `--set postgresql.auth.postgres-password=<some good password>,postgresql.auth.password=<some good password>`
password: NoTaGrEaTpAsSwOrD
postgres-password: NoTaGrEaTpAsSwOrD
# Use the Stackgres Helm chart to deploy an instance of a Stackgres cluster.
# The Stackgres Operator must already be installed within the target
# Kubernetes cluster.
# TODO: Stackgres deployment currently unsupported
useStackgresOperator: false
# A secret is created by this chart (litellm-helm) with the credentials that
# the new Postgres instance should use.
# existingSecret: ""
# secretKeys:
# userPasswordKey: password
# Use the Postgres Helm chart to create a single node, stand alone postgres
# instance. See the "postgresql" top level key for additional configuration.
deployStandalone: true
# requires cache: true in config file
# either enable this or pass a secret for REDIS_HOST, REDIS_PORT, REDIS_PASSWORD or REDIS_URL
# with cache: true to use existing redis instance
redis:
enabled: false
architecture: standalone
# Settings for Bitnami postgresql chart (if db.deployStandalone is true, ignored
# otherwise)
postgresql:
architecture: standalone
auth:
username: litellm
database: litellm
# Prisma migration job settings
migrationJob:
enabled: true # Enable or disable the schema migration Job
retries: 3 # Number of retries for the Job in case of failure
backoffLimit: 4 # Backoff limit for Job restarts
disableSchemaUpdate: false # Skip schema migrations for specific environments. When True, the job will exit with code 0.
annotations: {}
ttlSecondsAfterFinished: 120
# You should override these on the helm command line with
# `--set postgresql.auth.postgres-password=<some good password>,postgresql.auth.password=<some good password>`
password: NoTaGrEaTpAsSwOrD
postgres-password: NoTaGrEaTpAsSwOrD
# Additional environment variables to be added to the deployment
envVars: {
# USE_DDTRACE: "true"
}
# A secret is created by this chart (litellm-helm) with the credentials that
# the new Postgres instance should use.
# existingSecret: ""
# secretKeys:
# userPasswordKey: password
# requires cache: true in config file
# either enable this or pass a secret for REDIS_HOST, REDIS_PORT, REDIS_PASSWORD or REDIS_URL
# with cache: true to use existing redis instance
redis:
enabled: false
architecture: standalone
# Prisma migration job settings
migrationJob:
enabled: true # Enable or disable the schema migration Job
retries: 3 # Number of retries for the Job in case of failure
backoffLimit: 4 # Backoff limit for Job restarts
disableSchemaUpdate: false # Skip schema migrations for specific environments. When True, the job will exit with code 0.
annotations: {}
ttlSecondsAfterFinished: 120
# Additional environment variables to be added to the deployment
envVars: {
# USE_DDTRACE: "true"
}

View File

@@ -10,18 +10,18 @@ spec:
middlewares:
- name: whitelist
namespace: traefik
- name: authentik-proxy-provider
namespace: traefik
# - name: authentik-proxy-provider
# namespace: traefik
kind: Rule
services:
- name: longhorn-frontend
port: 80
- match: Host(`longhorn.internal.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
kind: Rule
services:
- name: ak-outpost-authentik-embedded-outpost
namespace: authentik
port: 9000
# - match: Host(`longhorn.internal.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
# kind: Rule
# services:
# - name: ak-outpost-authentik-embedded-outpost
# namespace: authentik
# port: 9000
tls:
secretName: longhorn-tls

View File

@@ -19,7 +19,7 @@ spec:
spec:
containers:
- name: app
image: registry.internal.durp.info/louislam/uptime-kuma:1
image: registry.durp.info/louislam/uptime-kuma:1
ports:
- containerPort: 3001
volumeMounts: