---
clusterName: "elasticsearch"
nodeGroup: "master"# The service that non master groups will try to connect to when joining the cluster
# This should be set to clusterName + "-" + nodeGroup for your master group
masterService: ""# Elasticsearch roles that will be applied to this nodeGroup
# These will be set as environment variables. E.g. node.master=true
roles:master: "true"ingest: "true"data: "true"replicas: 3
minimumMasterNodes: 2esMajorVersion: ""# Allows you to add any config files in /usr/share/elasticsearch/config/
# such as elasticsearch.yml and log4j2.properties
esConfig:elasticsearch.yml: |
# path.repo: "/usr/share/elasticsearch/myBackup"
# log4j2.properties: |
# key = value# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
extraEnvs: []
# - name: MY_ENVIRONMENT_VAR
# value: the_value_goes_here# Allows you to load environment variables from kubernetes secret or config map
envFrom: []
# - secretRef:
# name: env-secret
# - configMapRef:
# name: config-map# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts: []
# - name: elastic-certificates
# secretName: elastic-certificates
# path: /usr/share/elasticsearch/config/certs
# defaultMode: 0755image: "69.172.74.253:8080/elk/elasticsearch"
imageTag: "7.7.1"
imagePullPolicy: "IfNotPresent"podAnnotations: {}# iam.amazonaws.com/role: es-cluster# additionals labels
labels: {}esJavaOpts: "-Xmx1g -Xms1g"resources:requests:cpu: "1000m"memory: "2Gi"limits:cpu: "1000m"memory: "2Gi"initResources: {}# limits:# cpu: "25m"# # memory: "128Mi"# requests:# cpu: "25m"# memory: "128Mi"sidecarResources: {}# limits:# cpu: "25m"# # memory: "128Mi"# requests:# cpu: "25m"# memory: "128Mi"networkHost: "0.0.0.0"volumeClaimTemplate:accessModes: ["ReadWriteOnce" ]volumeMode: FilesystemstorageClassName: local-elasticsearchresources:requests:storage: 3Girbac:create: falseserviceAccountName: ""podSecurityPolicy:create: falsename: ""spec:privileged: truefsGroup:rule: RunAsAnyrunAsUser:rule: RunAsAnyseLinux:rule: RunAsAnysupplementalGroups:rule: RunAsAnyvolumes:- secret- configMap- persistentVolumeClaimpersistence:enabled: trueannotations: {}#annotations: {volume.beta.kubernetes.io/storage-class: "nfs-client"}pvc:enabled: falsename: elasticsearch-pvcextraVolumes: []# - name: extras# emptyDir: {}extraVolumeMounts: []# - name: extras# mountPath: /usr/share/extras# readOnly: trueextraContainers: []# - name: do-something# image: busybox# command: ['do', 'something']extraInitContainers: []# - name: do-something# image: busybox# command: ['do', 'something']# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"# Hard means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
antiAffinity: "hard"# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"# The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when
# there are many services in the current namespace.
# If you experience slow pod startups you probably want to set this to `false`.
enableServiceLinks: trueprotocol: http
httpPort: 9200
transportPort: 9300service:labels: {}labelsHeadless: {}type: NodePortnodePort: 32060annotations: {}httpPortName: httptransportPortName: transportloadBalancerIP: ""loadBalancerSourceRanges: []updateStrategy: RollingUpdate# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1podSecurityContext:fsGroup: 1000runAsUser: 1000securityContext:capabilities:drop:- ALL# readOnlyRootFilesystem: truerunAsNonRoot: truerunAsUser: 1000# How long to wait for elasticsearch to stop gracefully
terminationGracePeriod: 120sysctlVmMaxMapCount: 262144readinessProbe:failureThreshold: 3initialDelaySeconds: 10periodSeconds: 10successThreshold: 3timeoutSeconds: 5# https://www.elastic.co/guide/en/elasticsearch/reference/7.8/cluster-health.html#request-params wait_for_status
clusterHealthCheckParams: "wait_for_status=green&timeout=1s"## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""imagePullSecrets:- name: registry-secret
nodeSelector: {}
tolerations: []# Enabling this will publically expose your Elasticsearch instance.
# Only enable this if you have security enabled on your cluster
ingress:enabled: falseannotations: {}# kubernetes.io/ingress.class: nginx# kubernetes.io/tls-acme: "true"path: /hosts:- chart-example.localtls: []# - secretName: chart-example-tls# hosts:# - chart-example.localnameOverride: ""
fullnameOverride: ""# https://github.com/elastic/helm-charts/issues/63
masterTerminationFix: falselifecycle: {}# preStop:# exec:# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]# postStart:# exec:# command:# - bash# - -c# - |# #!/bin/bash# # Add a template to adjust number of shards/replicas# TEMPLATE_NAME=my_template# INDEX_PATTERN="logstash-*"# SHARD_COUNT=8# REPLICA_COUNT=1# ES_URL=http://localhost:9200# while [[ "$(curl -s -o /dev/null -w '%{http_code}n' $ES_URL)" != "200" ]]; do sleep 1; done# curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['""$INDEX_PATTERN""'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}'sysctlInitContainer:enabled: truekeystore: []# Deprecated
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""