################################################################################ # Copyright (c) 2019 AT&T Intellectual Property. # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # ################################################################################ --- apiVersion: apps/v1 kind: StatefulSet metadata: name: {{ template "uname" . }} labels: heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" app: "{{ template "uname" . }}" {{- range $key, $value := .Values.labels }} {{ $key }}: {{ $value | quote }} {{- end }} annotations: esMajorVersion: "{{ include "esMajorVersion" . }}" spec: serviceName: {{ template "uname" . }}-headless selector: matchLabels: app: "{{ template "uname" . }}" replicas: {{ default .Values.replicas }} podManagementPolicy: {{ .Values.podManagementPolicy }} updateStrategy: type: {{ .Values.updateStrategy }} {{- if .Values.persistence.enabled }} volumeClaimTemplates: - metadata: name: {{ template "uname" . }} {{- with .Values.persistence.annotations }} annotations: {{ toYaml . | indent 8 }} {{- end }} spec: {{ toYaml .Values.volumeClaimTemplate | indent 6 }} {{- end }} template: metadata: name: "{{ template "uname" . }}" labels: heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" app: "{{ template "uname" . }}" annotations: {{- range $key, $value := .Values.podAnnotations }} {{ $key }}: {{ $value | quote }} {{- end }} {{/* This forces a restart if the configmap has changed */}} {{- if .Values.esConfig }} configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }} {{- end }} spec: {{- if .Values.schedulerName }} schedulerName: "{{ .Values.schedulerName }}" {{- end }} securityContext: {{ toYaml .Values.podSecurityContext | indent 8 }} {{- if .Values.fsGroup }} fsGroup: {{ .Values.fsGroup }} # Deprecated value, please use .Values.podSecurityContext.fsGroup {{- end }} {{- with .Values.tolerations }} tolerations: {{ toYaml . | indent 6 }} {{- end }} {{- with .Values.nodeSelector }} nodeSelector: {{ toYaml . | indent 8 }} {{- end }} {{- if or (eq .Values.antiAffinity "hard") (eq .Values.antiAffinity "soft") .Values.nodeAffinity }} {{- if .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }} {{- end }} affinity: {{- end }} {{- if eq .Values.antiAffinity "hard" }} podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: app operator: In values: - "{{ template "uname" .}}" topologyKey: {{ .Values.antiAffinityTopologyKey }} {{- else if eq .Values.antiAffinity "soft" }} podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 1 podAffinityTerm: topologyKey: {{ .Values.antiAffinityTopologyKey }} labelSelector: matchExpressions: - key: app operator: In values: - "{{ template "uname" . }}" {{- end }} {{- with .Values.nodeAffinity }} nodeAffinity: {{ toYaml . | indent 10 }} {{- end }} terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} volumes: {{- range .Values.secretMounts }} - name: {{ .name }} secret: secretName: {{ .secretName }} {{- end }} {{- if .Values.esConfig }} - name: esconfig configMap: name: {{ template "uname" . }}-config {{- end }} {{- if .Values.extraVolumes }} {{ tpl .Values.extraVolumes . | indent 6 }} {{- end }} {{- if .Values.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.imagePullSecrets | indent 8 }} {{- end }} initContainers: {{- if .Values.sysctlInitContainer.enabled }} - name: configure-sysctl securityContext: runAsUser: 0 privileged: true image: "{{ .Values.image }}:{{ .Values.imageTag }}" command: ["sysctl", "-w", "vm.max_map_count={{ .Values.sysctlVmMaxMapCount}}"] resources: {{ toYaml .Values.initResources | indent 10 }} {{- end }} {{- if .Values.extraInitContainers }} {{ tpl .Values.extraInitContainers . | indent 6 }} {{- end }} containers: - name: "{{ template "name" . }}" securityContext: {{ toYaml .Values.securityContext | indent 10 }} image: "{{ .Values.image }}:{{ .Values.imageTag }}" imagePullPolicy: "{{ .Values.imagePullPolicy }}" readinessProbe: {{ toYaml .Values.readinessProbe | indent 10 }} exec: command: - sh - -c - | #!/usr/bin/env bash -e # If the node is starting up wait for the cluster to be ready (request params: '{{ .Values.clusterHealthCheckParams }}' ) # Once it has started only check that the node itself is responding START_FILE=/tmp/.es_start_file http () { local path="${1}" if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}" else BASIC_AUTH='' fi curl -XGET -s -k --fail ${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}${path} } if [ -f "${START_FILE}" ]; then echo 'Elasticsearch is already running, lets check the node is healthy' http "/" else echo 'Waiting for elasticsearch cluster to become cluster to be ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' if http "/_cluster/health?{{ .Values.clusterHealthCheckParams }}" ; then touch ${START_FILE} exit 0 else echo 'Cluster is not yet ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' exit 1 fi fi ports: - name: http containerPort: {{ .Values.httpPort }} - name: transport containerPort: {{ .Values.transportPort }} resources: {{ toYaml .Values.resources | indent 10 }} env: - name: node.name valueFrom: fieldRef: fieldPath: metadata.name {{- if eq .Values.roles.master "true" }} {{- if ge (int (include "esMajorVersion" .)) 7 }} - name: cluster.initial_master_nodes value: "{{ template "endpoints" .Values }}" {{- else }} - name: discovery.zen.minimum_master_nodes value: "{{ .Values.minimumMasterNodes }}" {{- end }} {{- end }} {{- if lt (int (include "esMajorVersion" .)) 7 }} - name: discovery.zen.ping.unicast.hosts value: "{{ template "masterService" . }}-headless" {{- else }} - name: discovery.seed_hosts value: "{{ template "masterService" . }}-headless" {{- end }} - name: cluster.name value: "{{ .Values.clusterName }}" - name: network.host value: "{{ .Values.networkHost }}" - name: ES_JAVA_OPTS value: "{{ .Values.esJavaOpts }}" {{- range $role, $enabled := .Values.roles }} - name: node.{{ $role }} value: "{{ $enabled }}" {{- end }} {{- if .Values.extraEnvs }} {{ toYaml .Values.extraEnvs | indent 10 }} {{- end }} volumeMounts: {{- if .Values.persistence.enabled }} - name: "{{ template "uname" . }}" mountPath: /usr/share/elasticsearch/data {{- end }} {{- range .Values.secretMounts }} - name: {{ .name }} mountPath: {{ .path }} {{- if .subPath }} subPath: {{ .subPath }} {{- end }} {{- end }} {{- range $path, $config := .Values.esConfig }} - name: esconfig mountPath: /usr/share/elasticsearch/config/{{ $path }} subPath: {{ $path }} {{- end -}} {{- if .Values.extraVolumeMounts }} {{ tpl .Values.extraVolumeMounts . | indent 10 }} {{- end }} {{- if .Values.masterTerminationFix }} {{- if eq .Values.roles.master "true" }} # This sidecar will prevent slow master re-election # https://github.com/elastic/helm-charts/issues/63 - name: elasticsearch-master-graceful-termination-handler image: "{{ .Values.image }}:{{ .Values.imageTag }}" imagePullPolicy: "{{ .Values.imagePullPolicy }}" command: - "sh" - -c - | #!/usr/bin/env bash set -eo pipefail http () { local path="${1}" if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}" else BASIC_AUTH='' fi curl -XGET -s -k --fail ${BASIC_AUTH} {{ .Values.protocol }}://{{ template "masterService" . }}:{{ .Values.httpPort }}${path} } cleanup () { while true ; do local master="$(http "/_cat/master?h=node" || echo "")" if [[ $master == "{{ template "masterService" . }}"* && $master != "${NODE_NAME}" ]]; then echo "This node is not master." break fi echo "This node is still master, waiting gracefully for it to step down" sleep 1 done exit 0 } trap cleanup SIGTERM sleep infinity & wait $! resources: {{ toYaml .Values.sidecarResources | indent 10 }} env: - name: NODE_NAME valueFrom: fieldRef: fieldPath: metadata.name {{- if .Values.extraEnvs }} {{ toYaml .Values.extraEnvs | indent 10 }} {{- end }} {{- end }} {{- end }} {{- if .Values.lifecycle }} lifecycle: {{ toYaml .Values.lifecycle | indent 10 }} {{- end }}