remove vault

This commit is contained in:
2022-11-19 09:37:03 -06:00
parent d6a50926bd
commit f585ee0f6e
5 changed files with 0 additions and 996 deletions

View File

@@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vault
namespace: argocd
spec:
project: default
source:
repoURL: https://github.com/DeveloperDurp/homelab.git
targetRevision: main
path: vault
destination:
namespace: vault
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,11 +0,0 @@
apiVersion: v2
name: vault
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: vault
repository: https://helm.releases.hashicorp.com/
version: 0.22.1

View File

@@ -1,24 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
annotations:
pv.kubernetes.io/provisioned-by: durp.info/nfs
finalizers:
- kubernetes.io/pv-protection
name: vault-pv
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 20Gi
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
name: vault-pvc
namespace: vault-repository-manager
nfs:
path: /mnt/user/k3s/vault
server: 192.168.20.253
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-storage
volumeMode: Filesystem

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
app.kubernetes.io/component: app
app.kubernetes.io/instance: vault
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: vault
helm.sh/chart: vault-2.14.4
name: vault-pvc
namespace: vault
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
storageClassName: nfs-storage

View File

@@ -1,922 +0,0 @@
vault:
global:
enabled: true
imagePullSecrets: []
tlsDisable: true
externalVaultAddr: ""
openshift: false
serverTelemetry:
prometheusOperator: false
injector:
enabled: "-"
replicas: 1
port: 8080
leaderElector:
enabled: true
metrics:
enabled: false
externalVaultAddr: ""
image:
repository: "hashicorp/vault-k8s"
pullPolicy: Always
agentImage:
repository: "hashicorp/vault"
agentDefaults:
cpuRequest: "250m"
memLimit: "128Mi"
memRequest: "64Mi"
template: "map"
templateConfig:
exitOnRetryFailure: true
staticSecretRenderInterval: ""
authPath: "auth/kubernetes"
logLevel: "info"
logFormat: "standard"
revokeOnShutdown: false
webhook:
failurePolicy: Ignore
matchPolicy: Exact
timeoutSeconds: 30
namespaceSelector: {}
objectSelector: |
matchExpressions:
- key: app.kubernetes.io/name
operator: NotIn
values:
- {{ template "vault.name" . }}-agent-injector
annotations: {}
failurePolicy: Ignore
namespaceSelector: {}
objectSelector: {}
webhookAnnotations: {}
certs:
secretName: null
caBundle: ""
certName: tls.crt
keyName: tls.key
securityContext:
pod: {}
container: {}
resources:
requests:
memory: 256Mi
cpu: 250m
limits:
memory: 256Mi
extraEnvironmentVars: {}
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: {{ template "vault.name" . }}-agent-injector
app.kubernetes.io/instance: "{{ .Release.Name }}"
component: webhook
topologyKey: kubernetes.io/hostname
topologySpreadConstraints: []
tolerations: []
nodeSelector: {}
priorityClassName: ""
annotations: {}
extraLabels: {}
hostNetwork: false
service:
annotations: {}
serviceAccount:
annotations: {}
podDisruptionBudget: {}
strategy: {}
# strategy: |
# rollingUpdate:
# maxSurge: 25%
# maxUnavailable: 25%
# type: RollingUpdate
server:
# If true, or "-" with global.enabled true, Vault server will be installed.
# See vault.mode in _helpers.tpl for implementation details.
enabled: "-"
# [Enterprise Only] This value refers to a Kubernetes secret that you have
# created that contains your enterprise license. If you are not using an
# enterprise image or if you plan to introduce the license key via another
# route, then leave secretName blank ("") or set it to null.
# Requires Vault Enterprise 1.8 or later.
enterpriseLicense:
# The name of the Kubernetes secret that holds the enterprise license. The
# secret must be in the same namespace that Vault is installed into.
secretName: ""
# The key within the Kubernetes secret that holds the enterprise license.
secretKey: "license"
# Resource requests, limits, etc. for the server cluster placement. This
# should map directly to the value of the resources field for a PodSpec.
# By default no direct resource request is made.
image:
repository: "hashicorp/vault"
tag: "1.12.0"
# Overrides the default Image Pull Policy
pullPolicy: Always
# Configure the Update Strategy Type for the StatefulSet
# See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategyType: "OnDelete"
# Configure the logging verbosity for the Vault server.
# Supported log levels include: trace, debug, info, warn, error
logLevel: ""
# Configure the logging format for the Vault server.
# Supported log formats include: standard, json
logFormat: ""
resources: {}
# resources:
# requests:
# memory: 256Mi
# cpu: 250m
# limits:
# memory: 256Mi
# cpu: 250m
# Ingress allows ingress services to be created to allow external access
# from Kubernetes to access Vault pods.
# If deployment is on OpenShift, the following block is ignored.
# In order to expose the service, use the route section below
ingress:
enabled: true
labels: {}
# traffic: external
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
ingressClassName: "nginx"
# As of Kubernetes 1.19, all Ingress Paths must have a pathType configured. The default value below should be sufficient in most cases.
# See: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types for other possible values.
pathType: Prefix
# When HA mode is enabled and K8s service registration is being used,
# configure the ingress to point to the Vault active service.
activeService: true
hosts:
- host: vault.durp.info
paths: []
## Extra paths to prepend to the host configuration. This is useful when working with annotation based services.
extraPaths: []
# - path: /*
# backend:
# service:
# name: ssl-redirect
# port:
# number: use-annotation
tls:
- hosts:
- vault.durp.info
secretName: vault-tls
route:
enabled: false
# When HA mode is enabled and K8s service registration is being used,
# configure the route to point to the Vault active service.
activeService: true
labels: {}
annotations: {}
host: chart-example.local
# tls will be passed directly to the route's TLS config, which
# can be used to configure other termination methods that terminate
# TLS at the router
tls:
termination: passthrough
# authDelegator enables a cluster role binding to be attached to the service
# account. This cluster role binding can be used to setup Kubernetes auth
# method. https://www.vaultproject.io/docs/auth/kubernetes.html
authDelegator:
enabled: true
# extraInitContainers is a list of init containers. Specified as a YAML list.
# This is useful if you need to run a script to provision TLS certificates or
# write out configuration files in a dynamic way.
extraInitContainers: null
# # This example installs a plugin pulled from github into the /usr/local/libexec/vault/oauthapp folder,
# # which is defined in the volumes value.
# - name: oauthapp
# image: "alpine"
# command: [sh, -c]
# args:
# - cd /tmp &&
# wget https://github.com/puppetlabs/vault-plugin-secrets-oauthapp/releases/download/v1.2.0/vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64.tar.xz -O oauthapp.xz &&
# tar -xf oauthapp.xz &&
# mv vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64 /usr/local/libexec/vault/oauthapp &&
# chmod +x /usr/local/libexec/vault/oauthapp
# volumeMounts:
# - name: plugins
# mountPath: /usr/local/libexec/vault
# extraContainers is a list of sidecar containers. Specified as a YAML list.
extraContainers: null
# shareProcessNamespace enables process namespace sharing between Vault and the extraContainers
# This is useful if Vault must be signaled, e.g. to send a SIGHUP for a log rotation
shareProcessNamespace: false
# extraArgs is a string containing additional Vault server arguments.
extraArgs: ""
# Used to define custom readinessProbe settings
readinessProbe:
enabled: true
# If you need to use a http path instead of the default exec
# path: /v1/sys/health?standbyok=true
# When a probe fails, Kubernetes will try failureThreshold times before giving up
failureThreshold: 2
# Number of seconds after the container has started before probe initiates
initialDelaySeconds: 5
# How often (in seconds) to perform the probe
periodSeconds: 5
# Minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# Number of seconds after which the probe times out.
timeoutSeconds: 3
# Used to enable a livenessProbe for the pods
livenessProbe:
enabled: false
path: "/v1/sys/health?standbyok=true"
# When a probe fails, Kubernetes will try failureThreshold times before giving up
failureThreshold: 2
# Number of seconds after the container has started before probe initiates
initialDelaySeconds: 60
# How often (in seconds) to perform the probe
periodSeconds: 5
# Minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# Number of seconds after which the probe times out.
timeoutSeconds: 3
# Optional duration in seconds the pod needs to terminate gracefully.
# See: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/
terminationGracePeriodSeconds: 10
# Used to set the sleep time during the preStop step
preStopSleepSeconds: 5
# Used to define commands to run after the pod is ready.
# This can be used to automate processes such as initialization
# or boostrapping auth methods.
postStart: []
# - /bin/sh
# - -c
# - /vault/userconfig/myscript/run.sh
# extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be
# used to include variables required for auto-unseal.
extraEnvironmentVars: {}
# GOOGLE_REGION: global
# GOOGLE_PROJECT: myproject
# GOOGLE_APPLICATION_CREDENTIALS: /vault/userconfig/myproject/myproject-creds.json
# extraSecretEnvironmentVars is a list of extra environment variables to set with the stateful set.
# These variables take value from existing Secret objects.
extraSecretEnvironmentVars: []
# - envName: AWS_SECRET_ACCESS_KEY
# secretName: vault
# secretKey: AWS_SECRET_ACCESS_KEY
# Deprecated: please use 'volumes' instead.
# extraVolumes is a list of extra volumes to mount. These will be exposed
# to Vault in the path `/vault/userconfig/<name>/`. The value below is
# an array of objects, examples are shown below.
extraVolumes: []
# - type: secret (or "configMap")
# name: my-secret
# path: null # default is `/vault/userconfig`
# volumes is a list of volumes made available to all containers. These are rendered
# via toYaml rather than pre-processed like the extraVolumes value.
# The purpose is to make it easy to share volumes between containers.
volumes: null
# - name: plugins
# emptyDir: {}
# volumeMounts is a list of volumeMounts for the main server container. These are rendered
# via toYaml rather than pre-processed like the extraVolumes value.
# The purpose is to make it easy to share volumes between containers.
volumeMounts: null
# - mountPath: /usr/local/libexec/vault
# name: plugins
# readOnly: true
# Affinity Settings
# Commenting out or setting as empty the affinity variable, will allow
# deployment to single node services such as Minikube
# This should be either a multi-line string or YAML matching the PodSpec's affinity field.
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: {{ template "vault.name" . }}
app.kubernetes.io/instance: "{{ .Release.Name }}"
component: server
topologyKey: kubernetes.io/hostname
# Topology settings for server pods
# ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
# This should be either a multi-line string or YAML matching the topologySpreadConstraints array
# in a PodSpec.
topologySpreadConstraints: []
# Toleration Settings for server pods
# This should be either a multi-line string or YAML matching the Toleration array
# in a PodSpec.
tolerations: []
# nodeSelector labels for server pod assignment, formatted as a multi-line string or YAML map.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# Example:
# nodeSelector:
# beta.kubernetes.io/arch: amd64
nodeSelector: {}
# Enables network policy for server pods
networkPolicy:
enabled: false
egress: []
# egress:
# - to:
# - ipBlock:
# cidr: 10.0.0.0/24
# ports:
# - protocol: TCP
# port: 443
# Priority class for server pods
priorityClassName: ""
# Extra labels to attach to the server pods
# This should be a YAML map of the labels to apply to the server pods
extraLabels: {}
# Extra annotations to attach to the server pods
# This can either be YAML or a YAML-formatted multi-line templated string map
# of the annotations to apply to the server pods
annotations: {}
# Enables a headless service to be used by the Vault Statefulset
service:
enabled: true
# clusterIP controls whether a Cluster IP address is attached to the
# Vault service within Kubernetes. By default, the Vault service will
# be given a Cluster IP address, set to None to disable. When disabled
# Kubernetes will create a "headless" service. Headless services can be
# used to communicate with pods directly through DNS instead of a round-robin
# load balancer.
# clusterIP: None
# Configures the service type for the main Vault service. Can be ClusterIP
# or NodePort.
#type: ClusterIP
# Do not wait for pods to be ready
publishNotReadyAddresses: true
# The externalTrafficPolicy can be set to either Cluster or Local
# and is only valid for LoadBalancer and NodePort service types.
# The default value is Cluster.
# ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy
externalTrafficPolicy: Cluster
# If type is set to "NodePort", a specific nodePort value can be configured,
# will be random if left blank.
#nodePort: 30000
# When HA mode is enabled
# If type is set to "NodePort", a specific nodePort value can be configured,
# will be random if left blank.
#activeNodePort: 30001
# When HA mode is enabled
# If type is set to "NodePort", a specific nodePort value can be configured,
# will be random if left blank.
#standbyNodePort: 30002
# Port on which Vault server is listening
port: 8200
# Target port to which the service should be mapped to
targetPort: 8200
# Extra annotations for the service definition. This can either be YAML or a
# YAML-formatted multi-line templated string map of the annotations to apply
# to the service.
annotations: {}
# This configures the Vault Statefulset to create a PVC for data
# storage when using the file or raft backend storage engines.
# See https://www.vaultproject.io/docs/configuration/storage/index.html to know more
dataStorage:
enabled: true
existingClaim: vault-pvc
# This configures the Vault Statefulset to create a PVC for audit
# logs. Once Vault is deployed, initialized, and unsealed, Vault must
# be configured to use this for audit logs. This will be mounted to
# /vault/audit
# See https://www.vaultproject.io/docs/audit/index.html to know more
auditStorage:
enabled: false
# Size of the PVC created
size: 10Gi
# Location where the PVC will be mounted.
mountPath: "/vault/audit"
# Name of the storage class to use. If null it will use the
# configured default Storage Class.
storageClass: null
# Access Mode of the storage device being used for the PVC
accessMode: ReadWriteOnce
# Annotations to apply to the PVC
annotations: {}
# Run Vault in "dev" mode. This requires no further setup, no state management,
# and no initialization. This is useful for experimenting with Vault without
# needing to unseal, store keys, et. al. All data is lost on restart - do not
# use dev mode for anything other than experimenting.
# See https://www.vaultproject.io/docs/concepts/dev-server.html to know more
dev:
enabled: false
# Set VAULT_DEV_ROOT_TOKEN_ID value
devRootToken: "root"
# Run Vault in "standalone" mode. This is the default mode that will deploy if
# no arguments are given to helm. This requires a PVC for data storage to use
# the "file" backend. This mode is not highly available and should not be scaled
# past a single replica.
standalone:
enabled: "-"
# config is a raw string of default configuration when using a Stateful
# deployment. Default is to use a PersistentVolumeClaim mounted at /vault/data
# and store data there. This is only used when using a Replica count of 1, and
# using a stateful set. This should be HCL.
# Note: Configuration files are stored in ConfigMaps so sensitive data
# such as passwords should be either mounted through extraSecretEnvironmentVars
# or through a Kube secret. For more information see:
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
# Enable unauthenticated metrics access (necessary for Prometheus Operator)
#telemetry {
# unauthenticated_metrics_access = "true"
#}
}
storage "file" {
path = "/vault/data"
}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
# Example configuration for enabling Prometheus metrics in your config.
#telemetry {
# prometheus_retention_time = "30s",
# disable_hostname = true
#}
# Run Vault in "HA" mode. There are no storage requirements unless the audit log
# persistence is required. In HA mode Vault will configure itself to use Consul
# for its storage backend. The default configuration provided will work the Consul
# Helm project by default. It is possible to manually configure Vault to use a
# different HA backend.
ha:
enabled: false
replicas: 3
# Set the api_addr configuration for Vault HA
# See https://www.vaultproject.io/docs/configuration#api_addr
# If set to null, this will be set to the Pod IP Address
apiAddr: null
# Set the cluster_addr confuguration for Vault HA
# See https://www.vaultproject.io/docs/configuration#cluster_addr
# If set to null, this will be set to https://$(HOSTNAME).{{ template "vault.fullname" . }}-internal:8201
clusterAddr: null
# Enables Vault's integrated Raft storage. Unlike the typical HA modes where
# Vault's persistence is external (such as Consul), enabling Raft mode will create
# persistent volumes for Vault to store data according to the configuration under server.dataStorage.
# The Vault cluster will coordinate leader elections and failovers internally.
raft:
# Enables Raft integrated storage
enabled: false
# Set the Node Raft ID to the name of the pod
setNodeId: false
# Note: Configuration files are stored in ConfigMaps so sensitive data
# such as passwords should be either mounted through extraSecretEnvironmentVars
# or through a Kube secret. For more information see:
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
# Enable unauthenticated metrics access (necessary for Prometheus Operator)
#telemetry {
# unauthenticated_metrics_access = "true"
#}
}
storage "raft" {
path = "/vault/data"
}
service_registration "kubernetes" {}
# config is a raw string of default configuration when using a Stateful
# deployment. Default is to use a Consul for its HA storage backend.
# This should be HCL.
# Note: Configuration files are stored in ConfigMaps so sensitive data
# such as passwords should be either mounted through extraSecretEnvironmentVars
# or through a Kube secret. For more information see:
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "consul" {
path = "vault"
address = "HOST_IP:8500"
}
service_registration "kubernetes" {}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev-246514"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
# Example configuration for enabling Prometheus metrics.
# If you are using Prometheus Operator you can enable a ServiceMonitor resource below.
# You may wish to enable unauthenticated metrics in the listener block above.
#telemetry {
# prometheus_retention_time = "30s",
# disable_hostname = true
#}
# A disruption budget limits the number of pods of a replicated application
# that are down simultaneously from voluntary disruptions
disruptionBudget:
enabled: true
# maxUnavailable will default to (n/2)-1 where n is the number of
# replicas. If you'd like a custom value, you can specify an override here.
maxUnavailable: null
# Definition of the serviceAccount used to run Vault.
# These options are also used when using an external Vault server to validate
# Kubernetes tokens.
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# Extra annotations for the serviceAccount definition. This can either be
# YAML or a YAML-formatted multi-line templated string map of the
# annotations to apply to the serviceAccount.
annotations: {}
# Extra labels to attach to the serviceAccount
# This should be a YAML map of the labels to apply to the serviceAccount
extraLabels: {}
# Settings for the statefulSet used to run Vault.
statefulSet:
# Extra annotations for the statefulSet. This can either be YAML or a
# YAML-formatted multi-line templated string map of the annotations to apply
# to the statefulSet.
annotations: {}
# Set the pod and container security contexts.
# If not set, these will default to, and for *not* OpenShift:
# pod:
# runAsNonRoot: true
# runAsGroup: {{ .Values.server.gid | default 1000 }}
# runAsUser: {{ .Values.server.uid | default 100 }}
# fsGroup: {{ .Values.server.gid | default 1000 }}
# container:
# allowPrivilegeEscalation: false
#
# If not set, these will default to, and for OpenShift:
# pod: {}
# container: {}
securityContext:
pod: {}
container: {}
# Should the server pods run on the host network
hostNetwork: false
# Vault UI
ui:
# True if you want to create a Service entry for the Vault UI.
#
# serviceType can be used to control the type of service created. For
# example, setting this to "LoadBalancer" will create an external load
# balancer (for supported K8S installations) to access the UI.
enabled: false
publishNotReadyAddresses: true
# The service should only contain selectors for active Vault pod
activeVaultPodOnly: false
serviceType: "ClusterIP"
serviceNodePort: null
externalPort: 8200
targetPort: 8200
# The externalTrafficPolicy can be set to either Cluster or Local
# and is only valid for LoadBalancer and NodePort service types.
# The default value is Cluster.
# ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy
externalTrafficPolicy: Cluster
#loadBalancerSourceRanges:
# - 10.0.0.0/16
# - 1.78.23.3/32
# loadBalancerIP:
# Extra annotations to attach to the ui service
# This can either be YAML or a YAML-formatted multi-line templated string map
# of the annotations to apply to the ui service
annotations: {}
# secrets-store-csi-driver-provider-vault
csi:
# True if you want to install a secrets-store-csi-driver-provider-vault daemonset.
#
# Requires installing the secrets-store-csi-driver separately, see:
# https://github.com/kubernetes-sigs/secrets-store-csi-driver#install-the-secrets-store-csi-driver
#
# With the driver and provider installed, you can mount Vault secrets into volumes
# similar to the Vault Agent injector, and you can also sync those secrets into
# Kubernetes secrets.
enabled: false
image:
repository: "hashicorp/vault-csi-provider"
tag: "1.2.0"
pullPolicy: IfNotPresent
# volumes is a list of volumes made available to all containers. These are rendered
# via toYaml rather than pre-processed like the extraVolumes value.
# The purpose is to make it easy to share volumes between containers.
volumes: null
# - name: tls
# secret:
# secretName: vault-tls
# volumeMounts is a list of volumeMounts for the main server container. These are rendered
# via toYaml rather than pre-processed like the extraVolumes value.
# The purpose is to make it easy to share volumes between containers.
volumeMounts: null
# - name: tls
# mountPath: "/vault/tls"
# readOnly: true
resources: {}
# resources:
# requests:
# cpu: 50m
# memory: 128Mi
# limits:
# cpu: 50m
# memory: 128Mi
# Settings for the daemonSet used to run the provider.
daemonSet:
updateStrategy:
type: RollingUpdate
maxUnavailable: ""
# Extra annotations for the daemonSet. This can either be YAML or a
# YAML-formatted multi-line templated string map of the annotations to apply
# to the daemonSet.
annotations: {}
# Provider host path (must match the CSI provider's path)
providersDir: "/etc/kubernetes/secrets-store-csi-providers"
# Kubelet host path
kubeletRootDir: "/var/lib/kubelet"
# Extra labels to attach to the vault-csi-provider daemonSet
# This should be a YAML map of the labels to apply to the csi provider daemonSet
extraLabels: {}
# security context for the pod template and container in the csi provider daemonSet
securityContext:
pod: {}
container: {}
pod:
# Extra annotations for the provider pods. This can either be YAML or a
# YAML-formatted multi-line templated string map of the annotations to apply
# to the pod.
annotations: {}
# Toleration Settings for provider pods
# This should be either a multi-line string or YAML matching the Toleration array
# in a PodSpec.
tolerations: []
# Extra labels to attach to the vault-csi-provider pod
# This should be a YAML map of the labels to apply to the csi provider pod
extraLabels: {}
# Priority class for csi pods
priorityClassName: ""
serviceAccount:
# Extra annotations for the serviceAccount definition. This can either be
# YAML or a YAML-formatted multi-line templated string map of the
# annotations to apply to the serviceAccount.
annotations: {}
# Extra labels to attach to the vault-csi-provider serviceAccount
# This should be a YAML map of the labels to apply to the csi provider serviceAccount
extraLabels: {}
# Used to configure readinessProbe for the pods.
readinessProbe:
# When a probe fails, Kubernetes will try failureThreshold times before giving up
failureThreshold: 2
# Number of seconds after the container has started before probe initiates
initialDelaySeconds: 5
# How often (in seconds) to perform the probe
periodSeconds: 5
# Minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# Number of seconds after which the probe times out.
timeoutSeconds: 3
# Used to configure livenessProbe for the pods.
livenessProbe:
# When a probe fails, Kubernetes will try failureThreshold times before giving up
failureThreshold: 2
# Number of seconds after the container has started before probe initiates
initialDelaySeconds: 5
# How often (in seconds) to perform the probe
periodSeconds: 5
# Minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# Number of seconds after which the probe times out.
timeoutSeconds: 3
# Enables debug logging.
debug: false
# Pass arbitrary additional arguments to vault-csi-provider.
# See https://www.vaultproject.io/docs/platform/k8s/csi/configurations#command-line-arguments
# for the available command line flags.
extraArgs: []
# Vault is able to collect and publish various runtime metrics.
# Enabling this feature requires setting adding `telemetry{}` stanza to
# the Vault configuration. There are a few examples included in the `config` sections above.
#
# For more information see:
# https://www.vaultproject.io/docs/configuration/telemetry
# https://www.vaultproject.io/docs/internals/telemetry
serverTelemetry:
# Enable support for the Prometheus Operator. Currently, this chart does not support
# authenticating to Vault's metrics endpoint, so the following `telemetry{}` must be included
# in the `listener "tcp"{}` stanza
# telemetry {
# unauthenticated_metrics_access = "true"
# }
#
# See the `standalone.config` for a more complete example of this.
#
# In addition, a top level `telemetry{}` stanza must also be included in the Vault configuration:
#
# example:
# telemetry {
# prometheus_retention_time = "30s",
# disable_hostname = true
# }
#
# Configuration for monitoring the Vault server.
serviceMonitor:
# The Prometheus operator *must* be installed before enabling this feature,
# if not the chart will fail to install due to missing CustomResourceDefinitions
# provided by the operator.
#
# Instructions on how to install the Helm chart can be found here:
# https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack
# More information can be found here:
# https://github.com/prometheus-operator/prometheus-operator
# https://github.com/prometheus-operator/kube-prometheus
# Enable deployment of the Vault Server ServiceMonitor CustomResource.
enabled: false
# Selector labels to add to the ServiceMonitor.
# When empty, defaults to:
# release: prometheus
selectors: {}
# Interval at which Prometheus scrapes metrics
interval: 30s
# Timeout for Prometheus scrapes
scrapeTimeout: 10s
prometheusRules:
# The Prometheus operator *must* be installed before enabling this feature,
# if not the chart will fail to install due to missing CustomResourceDefinitions
# provided by the operator.
# Deploy the PrometheusRule custom resource for AlertManager based alerts.
# Requires that AlertManager is properly deployed.
enabled: false
# Selector labels to add to the PrometheusRules.
# When empty, defaults to:
# release: prometheus
selectors: {}
# Some example rules.
rules: {}
# - alert: vault-HighResponseTime
# annotations:
# message: The response time of Vault is over 500ms on average over the last 5 minutes.
# expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 500
# for: 5m
# labels:
# severity: warning
# - alert: vault-HighResponseTime
# annotations:
# message: The response time of Vault is over 1s on average over the last 5 minutes.
# expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 1000
# for: 5m
# labels:
# severity: critical