################################################################################ # Copyright (c) 2019 AT&T Intellectual Property. # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # ################################################################################ --- elasticsearchURL: "" # "http://elasticsearch-master:9200" elasticsearchHosts: "http://elasticsearch-master:9200" replicas: 1 # Extra environment variables to append to this nodeGroup # This will be appended to the current 'env:' key. You can use any of the kubernetes env # syntax here extraEnvs: # - name: XPACK_SECURITY_ENABLED # value: "false" # - name: MY_ENVIRONMENT_VAR # # value: the_value_goes_here # A list of secrets and their paths to mount inside the pod # This is useful for mounting certificates for security and for mounting # the X-Pack license secretMounts: [] # - name: kibana-keystore # secretName: kibana-keystore # path: /usr/share/kibana/data/kibana.keystore # subPath: kibana.keystore # optional image: "docker.elastic.co/kibana/kibana-oss" imageTag: "7.3.0" imagePullPolicy: "IfNotPresent" # additionals labels labels: {} podAnnotations: {} # iam.amazonaws.com/role: es-cluster resources: requests: cpu: "100m" memory: "500m" limits: cpu: "1000m" memory: "1Gi" protocol: http serverHost: "0.0.0.0" healthCheckPath: "/app/kibana" # Allows you to add any config files in /usr/share/kibana/config/ # such as kibana.yml kibanaConfig: {} # kibana.yml: | # # key: # # nestedkey: value #kibanaConfig: # kibana.yml: | # xpack.security.enabled: false # If Pod Security Policy in use it may be required to specify security context as well as service account podSecurityContext: fsGroup: 1000 securityContext: capabilities: drop: - ALL # readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000 serviceAccount: "" # This is the PriorityClass settings as defined in # https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass priorityClassName: "" # By default this will make sure two pods don't end up on the same node # Changing this to a region would allow you to spread pods across regions antiAffinityTopologyKey: "kubernetes.io/hostname" # Hard means that by default pods will only be scheduled if there are enough nodes for them # and that they will never end up on the same node. Setting this to soft will do this "best effort" antiAffinity: "hard" httpPort: 5601 # This is the max unavailable setting for the pod disruption budget # The default value of 1 will make sure that kubernetes won't allow more than 1 # of your pods to be unavailable during maintenance maxUnavailable: 1 updateStrategy: type: "Recreate" service: type: ClusterIP port: 5601 nodePort: annotations: {} # cloud.google.com/load-balancer-type: "Internal" # service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 # service.beta.kubernetes.io/azure-load-balancer-internal: "true" # service.beta.kubernetes.io/openstack-internal-load-balancer: "true" # service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - chart-example.local tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local readinessProbe: failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 3 timeoutSeconds: 5 imagePullSecrets: [] nodeSelector: {} tolerations: [] affinity: {} nameOverride: "" fullnameOverride: ""