Merge R3 into master
[it/dep.git] / ric-aux / 80-Auxiliary-Functions / helm / mc-stack / charts / elasticsearch / values.yaml
diff --git a/ric-aux/80-Auxiliary-Functions/helm/mc-stack/charts/elasticsearch/values.yaml b/ric-aux/80-Auxiliary-Functions/helm/mc-stack/charts/elasticsearch/values.yaml
deleted file mode 100755 (executable)
index a127659..0000000
+++ /dev/null
@@ -1,228 +0,0 @@
-################################################################################
-#   Copyright (c) 2019 AT&T Intellectual Property.                             #
-#                                                                              #
-#   Licensed under the Apache License, Version 2.0 (the "License");            #
-#   you may not use this file except in compliance with the License.           #
-#   You may obtain a copy of the License at                                    #
-#                                                                              #
-#       http://www.apache.org/licenses/LICENSE-2.0                             #
-#                                                                              #
-#   Unless required by applicable law or agreed to in writing, software        #
-#   distributed under the License is distributed on an "AS IS" BASIS,          #
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
-#   See the License for the specific language governing permissions and        #
-#   limitations under the License.                                             #
-################################################################################
-
----
-clusterName: "elasticsearch"
-nodeGroup: "master"
-
-# The service that non master groups will try to connect to when joining the cluster
-# This should be set to clusterName + "-" + nodeGroup for your master group
-masterService: ""
-
-# Elasticsearch roles that will be applied to this nodeGroup
-# These will be set as environment variables. E.g. node.master=true
-roles:
-  master: "true"
-  ingest: "true"
-  data: "true"
-
-replicas: 1
-minimumMasterNodes: 1
-
-esMajorVersion: ""
-
-# Allows you to add any config files in /usr/share/elasticsearch/config/
-# such as elasticsearch.yml and log4j2.properties
-esConfig: {}
-#  elasticsearch.yml: |
-#    key:
-#      nestedkey: value
-#  log4j2.properties: |
-#    key = value
-
-# Extra environment variables to append to this nodeGroup
-# This will be appended to the current 'env:' key. You can use any of the kubernetes env
-# syntax here
-extraEnvs: []
-#  - name: MY_ENVIRONMENT_VAR
-#    value: the_value_goes_here
-
-# A list of secrets and their paths to mount inside the pod
-# This is useful for mounting certificates for security and for mounting
-# the X-Pack license
-secretMounts: []
-#  - name: elastic-certificates
-#    secretName: elastic-certificates
-#    path: /usr/share/elasticsearch/config/certs
-
-image: "docker.elastic.co/elasticsearch/elasticsearch"
-imageTag: "7.3.0"
-imagePullPolicy: "IfNotPresent"
-
-podAnnotations: {}
-  # iam.amazonaws.com/role: es-cluster
-
-# additionals labels
-labels: {}
-
-esJavaOpts: "-Xmx1g -Xms1g"
-
-resources:
-  requests:
-    cpu: "100m"
-    memory: "2Gi"
-  limits:
-    cpu: "1000m"
-    memory: "2Gi"
-
-initResources: {}
-  # limits:
-  #   cpu: "25m"
-  #   # memory: "128Mi"
-  # requests:
-  #   cpu: "25m"
-  #   memory: "128Mi"
-
-sidecarResources: {}
-  # limits:
-  #   cpu: "25m"
-  #   # memory: "128Mi"
-  # requests:
-  #   cpu: "25m"
-  #   memory: "128Mi"
-
-networkHost: "0.0.0.0"
-
-volumeClaimTemplate:
-  accessModes: [ "ReadWriteOnce" ]
-  resources:
-    requests:
-      storage: 30Gi
-
-persistence:
-  enabled: false
-  annotations: {}
-
-extraVolumes: []
-  # - name: extras
-  #   emptyDir: {}
-
-extraVolumeMounts: []
-  # - name: extras
-  #   mountPath: /usr/share/extras
-  #   readOnly: true
-
-extraInitContainers: []
-  # - name: do-something
-  #   image: busybox
-  #   command: ['do', 'something']
-
-# This is the PriorityClass settings as defined in
-# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
-priorityClassName: ""
-
-# By default this will make sure two pods don't end up on the same node
-# Changing this to a region would allow you to spread pods across regions
-antiAffinityTopologyKey: "kubernetes.io/hostname"
-
-# Hard means that by default pods will only be scheduled if there are enough nodes for them
-# and that they will never end up on the same node. Setting this to soft will do this "best effort"
-antiAffinity: "hard"
-
-# This is the node affinity settings as defined in
-# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
-nodeAffinity: {}
-
-# The default is to deploy all pods serially. By setting this to parallel all pods are started at
-# the same time when bootstrapping the cluster
-podManagementPolicy: "Parallel"
-
-protocol: http
-httpPort: 9200
-transportPort: 9300
-
-service:
-  type: ClusterIP
-  nodePort:
-  annotations: {}
-
-updateStrategy: RollingUpdate
-
-# This is the max unavailable setting for the pod disruption budget
-# The default value of 1 will make sure that kubernetes won't allow more than 1
-# of your pods to be unavailable during maintenance
-maxUnavailable: 1
-
-podSecurityContext:
-  fsGroup: 1000
-
-# The following value is deprecated,
-# please use the above podSecurityContext.fsGroup instead
-fsGroup: ""
-
-securityContext:
-  capabilities:
-    drop:
-    - ALL
-  # readOnlyRootFilesystem: true
-  runAsNonRoot: true
-  runAsUser: 1000
-
-# How long to wait for elasticsearch to stop gracefully
-terminationGracePeriod: 120
-
-sysctlVmMaxMapCount: 262144
-
-readinessProbe:
-  failureThreshold: 3
-  initialDelaySeconds: 10
-  periodSeconds: 10
-  successThreshold: 3
-  timeoutSeconds: 5
-
-# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status
-clusterHealthCheckParams: "wait_for_status=green&timeout=1s"
-
-## Use an alternate scheduler.
-## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-##
-schedulerName: ""
-
-imagePullSecrets: []
-nodeSelector: {}
-tolerations: []
-
-# Enabling this will publically expose your Elasticsearch instance.
-# Only enable this if you have security enabled on your cluster
-ingress:
-  enabled: false
-  annotations: {}
-    # kubernetes.io/ingress.class: nginx
-    # kubernetes.io/tls-acme: "true"
-  path: /
-  hosts:
-    - chart-example.local
-  tls: []
-  #  - secretName: chart-example-tls
-  #    hosts:
-  #      - chart-example.local
-
-nameOverride: ""
-fullnameOverride: ""
-
-# https://github.com/elastic/helm-charts/issues/63
-masterTerminationFix: false
-
-lifecycle: {}
-  # preStop:
-  #   exec:
-  #     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
-  # postStart:
-  #   exec:
-  #     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
-
-sysctlInitContainer:
-  enabled: true