Merge "Update A1 Mediator version to 2.1.9"
authorLusheng Ji <lji@research.att.com>
Wed, 27 May 2020 11:20:16 +0000 (11:20 +0000)
committerGerrit Code Review <gerrit@o-ran-sc.org>
Wed, 27 May 2020 11:20:16 +0000 (11:20 +0000)
141 files changed:
bin/install
helm/appmgr/templates/serviceaccount.yaml
helm/infrastructure/requirements.yaml
helm/infrastructure/subcharts/danm-networks/templates/danm.yaml
helm/infrastructure/subcharts/kong/.helmignore [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/Chart.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/FAQs.md [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/README.md [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/charts/postgresql/.helmignore [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/Chart.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/README.md [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/ci/default-values.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/ci/shmvolume-disabled-values.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/files/README.md [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/files/conf.d/README.md [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/files/docker-entrypoint-initdb.d/README.md [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/NOTES.txt [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/_helpers.tpl [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/configmap.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/extended-config-configmap.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/initialization-configmap.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/metrics-configmap.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/metrics-svc.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/networkpolicy.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/prometheusrule.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/secrets.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/serviceaccount.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/servicemonitor.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/statefulset-slaves.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/statefulset.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/svc-headless.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/svc-read.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/templates/svc.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/values-production.yaml [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/values.schema.json [deleted file]
helm/infrastructure/subcharts/kong/charts/postgresql/values.yaml [deleted file]
helm/infrastructure/subcharts/kong/ci/default-values.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/ci/test1-values.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/ci/test2-values.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/ci/test3-values.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/requirements.yaml [deleted file]
helm/infrastructure/subcharts/kong/templates/NOTES.txt [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/_helpers.tpl [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/admission-webhook.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/config-custom-server-blocks.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/config-dbless.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/controller-rbac-resources.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/controller-service-account.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/custom-resource-definitions.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/deployment.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/ingress-admin.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/ingress-manager.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/ingress-portal-api.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/ingress-portal.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/ingress-proxy.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/migrations-post-upgrade.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/migrations-pre-upgrade.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/migrations.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/pdb.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/psp.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/service-kong-admin.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/service-kong-manager.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/service-kong-portal-api.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/service-kong-portal.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/service-kong-proxy.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/templates/servicemonitor.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/kong/values.yaml [changed mode: 0755->0644]
helm/infrastructure/subcharts/prometheus/.helmignore [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/Chart.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/README.md [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/.helmignore [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/Chart.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/OWNERS [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/README.md [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/NOTES.txt [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/clusterrole.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/deployment.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/service.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/values.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/requirements.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/NOTES.txt [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/_helpers.tpl [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-clusterrole.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-clusterrolebinding.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-configmap.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-deployment.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-ingress.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-networkpolicy.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-pdb.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-podsecuritypolicy.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-pvc.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-service-headless.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-service.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-serviceaccount.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/alertmanager-statefulset.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/node-exporter-daemonset.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/node-exporter-podsecuritypolicy.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/node-exporter-role.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/node-exporter-rolebinding.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/node-exporter-service.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/node-exporter-serviceaccount.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/pushgateway-clusterrole.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/pushgateway-clusterrolebinding.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/pushgateway-deployment.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/pushgateway-ingress.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/pushgateway-networkpolicy.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/pushgateway-pdb.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/pushgateway-podsecuritypolicy.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/pushgateway-pvc.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/pushgateway-service.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/pushgateway-serviceaccount.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-clusterrole.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-clusterrolebinding.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-configmap.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-deployment.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-ingress.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-networkpolicy.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-pdb.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-podsecuritypolicy.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-pvc.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-service-headless.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-service.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-serviceaccount.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-statefulset.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/templates/server-vpa.yaml [new file with mode: 0644]
helm/infrastructure/subcharts/prometheus/values.yaml [new file with mode: 0644]
helm/infrastructure/templates/deployment-tiller.yaml [changed mode: 0644->0755]
helm/infrastructure/templates/job-tiller-secrets.yaml [changed mode: 0644->0755]
helm/infrastructure/values.yaml
helm/submgr/templates/configmap.yaml
helm/submgr/templates/deployment.yaml
helm/submgr/templates/env.yaml

index 2a2424f..eb95d93 100755 (executable)
@@ -160,6 +160,12 @@ rules:
   - apiGroups: [""]
     resources: ["nodes"]
     verbs: ["list", "watch", "get"]
+  - apiGroups: [""]
+    resources: ["nodes/metrics"]
+    verbs: ["list", "watch", "get"]
+  - apiGroups: [""]
+    resources: ["nodes/proxy"]
+    verbs: ["list", "watch", "get"]
   - apiGroups: ["configuration.konghq.com"]
     resources: ["kongconsumers"]
     verbs: ["get", "list", "watch"]
@@ -175,6 +181,9 @@ rules:
   - apiGroups: ["networking.k8s.io"]
     resources: ["ingresses"]
     verbs: ["watch", "list", "get", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["ingresses"]
+    verbs: ["watch", "list", "get", "create", "delete", "update"]
   - apiGroups: [""]
     resources: ["persistentvolumes"]
     verbs: ["watch", "list", "get", "create", "delete"]
@@ -183,12 +192,19 @@ rules:
     verbs: ["watch", "list", "get", "create", "delete"]
   - apiGroups: ["extensions"]
     resources: ["ingresses/status"]
-    verbs: ["update"]
+    verbs: ["update", "get", "list", "watch"]
   - apiGroups: ["networking.k8s.io"]
     resources: ["ingresses/status"]
-    verbs: ["update"]
+    verbs: ["update", "get", "list", "watch"]
+  - apiGroups: ["certificates.k8s.io"]
+    resources: ["certificatesigningrequests"]
+    verbs: ["list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["list", "watch"]
+  - nonResourceURLs: ["/metrics"]
+    verbs: ["get"]
 ---
-
 apiVersion: rbac.authorization.k8s.io/v1
 kind: ClusterRoleBinding
 metadata:
@@ -263,7 +279,7 @@ fi
 for component in $COMPONENTS; do
     helm dep up $DIR/../helm/$component
     helm install -f $OVERRIDEYAML --namespace "${PLTNAMESPACE:-ricplt}" --name "${RELEASE_PREFIX}-$component" $DIR/../helm/$component
-    sleep 3
+    sleep 8
 done
 
 
index c873e30..407fb1f 100644 (file)
@@ -65,7 +65,7 @@ metadata:
   namespace: {{ include "common.tillerNameSpace" $ctx }}
 rules:
 - apiGroups: [""]
-  resources: ["configmaps", "endpoints"]
+  resources: ["configmaps", "endpoints", "services"]
   verbs: ["get", "list", "create", "update", "delete"]
 ---
 apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -80,4 +80,4 @@ roleRef:
 subjects:
   - kind: ServiceAccount
     name: {{ include "common.serviceaccountname.appmgr" . }}
-    namespace: {{ include "common.namespace.platform" . }}
\ No newline at end of file
+    namespace: {{ include "common.namespace.platform" . }}
index b1ab3ea..b2fbfdb 100644 (file)
@@ -39,3 +39,7 @@ dependencies:
     version: 1.0.0
     repository: "file://./subcharts/danm-networks"
     condition: danm-networks.enabled
+  - name: prometheus
+    version: 11.3.0
+    repository: "file://./subcharts/prometheus"
+    condition: prometheus.enabled
index d52823e..672b6ec 100644 (file)
@@ -39,7 +39,7 @@ spec:
 {{- if .net6 }}
     net6: {{ .net6 }}
 {{- end }}
-{{- if .routes }}
+{{- if .routes6 }}
     routes6:
 {{- range $subnet, $route := .routes6 }}
       {{ $subnet | quote }}: {{ $route | quote }}
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/.helmignore b/helm/infrastructure/subcharts/kong/charts/postgresql/.helmignore
deleted file mode 100755 (executable)
index a1c17ae..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-.git
-OWNERS
\ No newline at end of file
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/Chart.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/Chart.yaml
deleted file mode 100755 (executable)
index 2f67b5e..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-apiVersion: v1
-appVersion: 11.6.0
-description: Chart for PostgreSQL, an object-relational database management system
-  (ORDBMS) with an emphasis on extensibility and on standards-compliance.
-engine: gotpl
-home: https://www.postgresql.org/
-icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png
-keywords:
-- postgresql
-- postgres
-- database
-- sql
-- replication
-- cluster
-maintainers:
-- email: containers@bitnami.com
-  name: Bitnami
-- email: cedric@desaintmartin.fr
-  name: desaintmartin
-name: postgresql
-sources:
-- https://github.com/bitnami/bitnami-docker-postgresql
-version: 8.1.2
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/README.md b/helm/infrastructure/subcharts/kong/charts/postgresql/README.md
deleted file mode 100755 (executable)
index d3a66f9..0000000
+++ /dev/null
@@ -1,535 +0,0 @@
-# PostgreSQL
-
-[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance.
-
-For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha)
-
-## TL;DR;
-
-```console
-$ helm install stable/postgresql
-```
-
-## Introduction
-
-This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
-
-Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/).
-
-## Prerequisites
-
-- Kubernetes 1.12+
-- Helm 2.11+ or Helm 3.0-beta3+
-- PV provisioner support in the underlying infrastructure
-
-## Installing the Chart
-To install the chart with the release name `my-release`:
-
-```console
-$ helm install --name my-release stable/postgresql
-```
-
-The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
-
-> **Tip**: List all releases using `helm list`
-
-## Uninstalling the Chart
-
-To uninstall/delete the `my-release` deployment:
-
-```console
-$ helm delete my-release
-```
-
-The command removes all the Kubernetes components associated with the chart and deletes the release.
-
-## Parameters
-
-The following tables lists the configurable parameters of the PostgreSQL chart and their default values.
-
-|                   Parameter                   |                                                                                Description                                                                                |                            Default                            |
-|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------|
-| `global.imageRegistry`                        | Global Docker Image registry                                                                                                                                              | `nil`                                                         |
-| `global.postgresql.postgresqlDatabase`        | PostgreSQL database (overrides `postgresqlDatabase`)                                                                                                                      | `nil`                                                         |
-| `global.postgresql.postgresqlUsername`        | PostgreSQL username (overrides `postgresqlUsername`)                                                                                                                      | `nil`                                                         |
-| `global.postgresql.existingSecret`            | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`)                                                                                      | `nil`                                                         |
-| `global.postgresql.postgresqlPassword`        | PostgreSQL admin password (overrides `postgresqlPassword`)                                                                                                                | `nil`                                                         |
-| `global.postgresql.servicePort`               | PostgreSQL port (overrides `service.port`)                                                                                                                                | `nil`                                                         |
-| `global.postgresql.replicationPassword`       | Replication user password (overrides `replication.password`)                                                                                                              | `nil`                                                         |
-| `global.imagePullSecrets`                     | Global Docker registry secret names as an array                                                                                                                           | `[]` (does not add image pull secrets to deployed pods)       |
-| `global.storageClass`                         | Global storage class for dynamic provisioning                                                                                                                             | `nil`                                                         |
-| `image.registry`                              | PostgreSQL Image registry                                                                                                                                                 | `docker.io`                                                   |
-| `image.repository`                            | PostgreSQL Image name                                                                                                                                                     | `bitnami/postgresql`                                          |
-| `image.tag`                                   | PostgreSQL Image tag                                                                                                                                                      | `{TAG_NAME}`                                                  |
-| `image.pullPolicy`                            | PostgreSQL Image pull policy                                                                                                                                              | `IfNotPresent`                                                |
-| `image.pullSecrets`                           | Specify Image pull secrets                                                                                                                                                | `nil` (does not add image pull secrets to deployed pods)      |
-| `image.debug`                                 | Specify if debug values should be set                                                                                                                                     | `false`                                                       |
-| `nameOverride`                                | String to partially override postgresql.fullname template with a string (will prepend the release name)                                                                   | `nil`                                                         |
-| `fullnameOverride`                            | String to fully override postgresql.fullname template with a string                                                                                                       | `nil`                                                         |
-| `volumePermissions.image.registry`            | Init container volume-permissions image registry                                                                                                                          | `docker.io`                                                   |
-| `volumePermissions.image.repository`          | Init container volume-permissions image name                                                                                                                              | `bitnami/minideb`                                             |
-| `volumePermissions.image.tag`                 | Init container volume-permissions image tag                                                                                                                               | `stretch`                                                     |
-| `volumePermissions.image.pullPolicy`          | Init container volume-permissions image pull policy                                                                                                                       | `Always`                                                      |
-| `volumePermissions.securityContext.runAsUser` | User ID for the init container                                                                                                                                            | `0`                                                           |
-| `usePasswordFile`                             | Have the secrets mounted as a file instead of env vars                                                                                                                    | `false`                                                       |
-| `ldap.enabled`                                | Enable LDAP support                                                                                                                                                       | `false`                                                       |
-| `ldap.existingSecret`                         | Name of existing secret to use for LDAP passwords                                                                                                                         | `nil`                                                         |
-| `ldap.url`                                    | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]`                                                                          | `nil`                                                         |
-| `ldap.server`                                 | IP address or name of the LDAP server.                                                                                                                                    | `nil`                                                         |
-| `ldap.port`                                   | Port number on the LDAP server to connect to                                                                                                                              | `nil`                                                         |
-| `ldap.scheme`                                 | Set to `ldaps` to use LDAPS.                                                                                                                                              | `nil`                                                         |
-| `ldap.tls`                                    | Set to `1` to use TLS encryption                                                                                                                                          | `nil`                                                         |
-| `ldap.prefix`                                 | String to prepend to the user name when forming the DN to bind                                                                                                            | `nil`                                                         |
-| `ldap.suffix`                                 | String to append to the user name when forming the DN to bind                                                                                                             | `nil`                                                         |
-| `ldap.search_attr`                            | Attribute to match agains the user name in the search                                                                                                                     | `nil`                                                         |
-| `ldap.search_filter`                          | The search filter to use when doing search+bind authentication                                                                                                            | `nil`                                                         |
-| `ldap.baseDN`                                 | Root DN to begin the search for the user in                                                                                                                               | `nil`                                                         |
-| `ldap.bindDN`                                 | DN of user to bind to LDAP                                                                                                                                                | `nil`                                                         |
-| `ldap.bind_password`                          | Password for the user to bind to LDAP                                                                                                                                     | `nil`                                                         |
-| `replication.enabled`                         | Enable replication                                                                                                                                                        | `false`                                                       |
-| `replication.user`                            | Replication user                                                                                                                                                          | `repl_user`                                                   |
-| `replication.password`                        | Replication user password                                                                                                                                                 | `repl_password`                                               |
-| `replication.slaveReplicas`                   | Number of slaves replicas                                                                                                                                                 | `1`                                                           |
-| `replication.synchronousCommit`               | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off`                                                                      | `off`                                                         |
-| `replication.numSynchronousReplicas`          | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`.                                                      | `0`                                                           |
-| `replication.applicationName`                 | Cluster application name. Useful for advanced replication settings                                                                                                        | `my_application`                                              |
-| `existingSecret`                              | Name of existing secret to use for PostgreSQL passwords                                                                                                                   | `nil`                                                         |
-| `postgresqlPostgresPassword`                  | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`)                                                                                              | _random 10 character alphanumeric string_                     |
-| `postgresqlUsername`                          | PostgreSQL admin user                                                                                                                                                     | `postgres`                                                    |
-| `postgresqlPassword`                          | PostgreSQL admin password                                                                                                                                                 | _random 10 character alphanumeric string_                     |
-| `postgresqlDatabase`                          | PostgreSQL database                                                                                                                                                       | `nil`                                                         |
-| `postgresqlDataDir`                           | PostgreSQL data dir folder                                                                                                                                                | `/bitnami/postgresql` (same value as persistence.mountPath)   |
-| `extraEnv`                                    | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template.                                                               | `[]`                                                          |
-| `postgresqlInitdbArgs`                        | PostgreSQL initdb extra arguments                                                                                                                                         | `nil`                                                         |
-| `postgresqlInitdbWalDir`                      | PostgreSQL location for transaction log                                                                                                                                   | `nil`                                                         |
-| `postgresqlConfiguration`                     | Runtime Config Parameters                                                                                                                                                 | `nil`                                                         |
-| `postgresqlExtendedConf`                      | Extended Runtime Config Parameters (appended to main or default configuration)                                                                                            | `nil`                                                         |
-| `pgHbaConfiguration`                          | Content of pg_hba.conf                                                                                                                                                    | `nil (do not create pg_hba.conf)`                             |
-| `configurationConfigMap`                      | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template.             | `nil`                                                         |
-| `extendedConfConfigMap`                       | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template.                                                                         | `nil`                                                         |
-| `initdbScripts`                               | Dictionary of initdb scripts                                                                                                                                              | `nil`                                                         |
-| `initdbUsername`                              | PostgreSQL user to execute the .sql and sql.gz scripts                                                                                                                    | `nil`                                                         |
-| `initdbPassword`                              | Password for the user specified in `initdbUsername`                                                                                                                       | `nil`                                                         |
-| `initdbScriptsConfigMap`                      | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template.                                                                | `nil`                                                         |
-| `initdbScriptsSecret`                         | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil`                                                         |
-| `service.type`                                | Kubernetes Service type                                                                                                                                                   | `ClusterIP`                                                   |
-| `service.port`                                | PostgreSQL port                                                                                                                                                           | `5432`                                                        |
-| `service.nodePort`                            | Kubernetes Service nodePort                                                                                                                                               | `nil`                                                         |
-| `service.annotations`                         | Annotations for PostgreSQL service, the value is evaluated as a template.                                                                                                 | {}                                                            |
-| `service.loadBalancerIP`                      | loadBalancerIP if service type is `LoadBalancer`                                                                                                                          | `nil`                                                         |
-| `service.loadBalancerSourceRanges`            | Address that are allowed when svc is LoadBalancer                                                                                                                         | []                                                            |
-| `schedulerName`                               | Name of the k8s scheduler (other than default)                                                                                                                            | `nil`                                                         |
-| `shmVolume.enabled`                           | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s)                                                                                                        | `true`                                                        |
-| `persistence.enabled`                         | Enable persistence using PVC                                                                                                                                              | `true`                                                        |
-| `persistence.existingClaim`                   | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template.                                                                                        | `nil`                                                         |
-| `persistence.mountPath`                       | Path to mount the volume at                                                                                                                                               | `/bitnami/postgresql`                                         |
-| `persistence.subPath`                         | Subdirectory of the volume to mount at                                                                                                                                    | `""`                                                          |
-| `persistence.storageClass`                    | PVC Storage Class for PostgreSQL volume                                                                                                                                   | `nil`                                                         |
-| `persistence.accessModes`                     | PVC Access Mode for PostgreSQL volume                                                                                                                                     | `[ReadWriteOnce]`                                             |
-| `persistence.size`                            | PVC Storage Request for PostgreSQL volume                                                                                                                                 | `8Gi`                                                         |
-| `persistence.annotations`                     | Annotations for the PVC                                                                                                                                                   | `{}`                                                          |
-| `master.nodeSelector`                         | Node labels for pod assignment (postgresql master)                                                                                                                        | `{}`                                                          |
-| `master.affinity`                             | Affinity labels for pod assignment (postgresql master)                                                                                                                    | `{}`                                                          |
-| `master.tolerations`                          | Toleration labels for pod assignment (postgresql master)                                                                                                                  | `[]`                                                          |
-| `master.anotations`                           | Map of annotations to add to the statefulset (postgresql master)                                                                                                          | `{}`                                                          |
-| `master.labels`                               | Map of labels to add to the statefulset (postgresql master)                                                                                                               | `{}`                                                          |
-| `master.podAnnotations`                       | Map of annotations to add to the pods (postgresql master)                                                                                                                 | `{}`                                                          |
-| `master.podLabels`                            | Map of labels to add to the pods (postgresql master)                                                                                                                      | `{}`                                                          |
-| `master.priorityClassName`                    | Priority Class to use for each pod (postgresql master)                                                                                                                    | `nil`                                                          |
-| `master.extraInitContainers`                  | Additional init containers to add to the pods (postgresql master)                                                                                                         | `[]`                                                          |
-| `master.extraVolumeMounts`                    | Additional volume mounts to add to the pods (postgresql master)                                                                                                           | `[]`                                                          |
-| `master.extraVolumes`                         | Additional volumes to add to the pods (postgresql master)                                                                                                                 | `[]`                                                          |
-| `slave.nodeSelector`                          | Node labels for pod assignment (postgresql slave)                                                                                                                         | `{}`                                                          |
-| `slave.affinity`                              | Affinity labels for pod assignment (postgresql slave)                                                                                                                     | `{}`                                                          |
-| `slave.tolerations`                           | Toleration labels for pod assignment (postgresql slave)                                                                                                                   | `[]`                                                          |
-| `slave.anotations`                            | Map of annotations to add to the statefulsets (postgresql slave)                                                                                                          | `{}`                                                          |
-| `slave.labels`                                | Map of labels to add to the statefulsets (postgresql slave)                                                                                                               | `{}`                                                          |
-| `slave.podAnnotations`                        | Map of annotations to add to the pods (postgresql slave)                                                                                                                  | `{}`                                                          |
-| `slave.podLabels`                             | Map of labels to add to the pods (postgresql slave)                                                                                                                       | `{}`                                                          |
-| `slave.priorityClassName`                     | Priority Class to use for each pod (postgresql slave)                                                                                                                     | `nil`                                                          |
-| `slave.extraInitContainers`                   | Additional init containers to add to the pods (postgresql slave)                                                                                                          | `[]`                                                          |
-| `slave.extraVolumeMounts`                     | Additional volume mounts to add to the pods (postgresql slave)                                                                                                            | `[]`                                                          |
-| `slave.extraVolumes`                          | Additional volumes to add to the pods (postgresql slave)                                                                                                                  | `[]`                                                          |
-| `terminationGracePeriodSeconds`               | Seconds the pod needs to terminate gracefully                                                                                                                             | `nil`                                                         |
-| `resources`                                   | CPU/Memory resource requests/limits                                                                                                                                       | Memory: `256Mi`, CPU: `250m`                                  |
-| `securityContext.enabled`                     | Enable security context                                                                                                                                                   | `true`                                                        |
-| `securityContext.fsGroup`                     | Group ID for the container                                                                                                                                                | `1001`                                                        |
-| `securityContext.runAsUser`                   | User ID for the container                                                                                                                                                 | `1001`                                                        |
-| `serviceAccount.enabled`                      | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set)                                                     | `false`                                                       |
-| `serviceAcccount.name`                        | Name of existing service account                                                                                                                                          | `nil`                                                         |
-| `livenessProbe.enabled`                       | Would you like a livenessProbe to be enabled                                                                                                                              | `true`                                                        |
-| `networkPolicy.enabled`                       | Enable NetworkPolicy                                                                                                                                                      | `false`                                                       |
-| `networkPolicy.allowExternal`                 | Don't require client label for connections                                                                                                                                | `true`                                                        |
-| `networkPolicy.explicitNamespacesSelector`    | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed                                                                    | `nil`                                                         |
-| `livenessProbe.initialDelaySeconds`           | Delay before liveness probe is initiated                                                                                                                                  | 30                                                            |
-| `livenessProbe.periodSeconds`                 | How often to perform the probe                                                                                                                                            | 10                                                            |
-| `livenessProbe.timeoutSeconds`                | When the probe times out                                                                                                                                                  | 5                                                             |
-| `livenessProbe.failureThreshold`              | Minimum consecutive failures for the probe to be considered failed after having succeeded.                                                                                | 6                                                             |
-| `livenessProbe.successThreshold`              | Minimum consecutive successes for the probe to be considered successful after having failed                                                                               | 1                                                             |
-| `readinessProbe.enabled`                      | would you like a readinessProbe to be enabled                                                                                                                             | `true`                                                        |
-| `readinessProbe.initialDelaySeconds`          | Delay before readiness probe is initiated                                                                                                                                 | 5                                                             |
-| `readinessProbe.periodSeconds`                | How often to perform the probe                                                                                                                                            | 10                                                            |
-| `readinessProbe.timeoutSeconds`               | When the probe times out                                                                                                                                                  | 5                                                             |
-| `readinessProbe.failureThreshold`             | Minimum consecutive failures for the probe to be considered failed after having succeeded.                                                                                | 6                                                             |
-| `readinessProbe.successThreshold`             | Minimum consecutive successes for the probe to be considered successful after having failed                                                                               | 1                                                             |
-| `metrics.enabled`                             | Start a prometheus exporter                                                                                                                                               | `false`                                                       |
-| `metrics.service.type`                        | Kubernetes Service type                                                                                                                                                   | `ClusterIP`                                                   |
-| `service.clusterIP`                           | Static clusterIP or None for headless services                                                                                                                            | `nil`                                                         |
-| `metrics.service.annotations`                 | Additional annotations for metrics exporter pod                                                                                                                           | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` |
-| `metrics.service.loadBalancerIP`              | loadBalancerIP if redis metrics service type is `LoadBalancer`                                                                                                            | `nil`                                                         |
-| `metrics.serviceMonitor.enabled`              | Set this to `true` to create ServiceMonitor for Prometheus operator                                                                                                       | `false`                                                       |
-| `metrics.serviceMonitor.additionalLabels`     | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus                                                                                     | `{}`                                                          |
-| `metrics.serviceMonitor.namespace`            | Optional namespace in which to create ServiceMonitor                                                                                                                      | `nil`                                                         |
-| `metrics.serviceMonitor.interval`             | Scrape interval. If not set, the Prometheus default scrape interval is used                                                                                               | `nil`                                                         |
-| `metrics.serviceMonitor.scrapeTimeout`        | Scrape timeout. If not set, the Prometheus default scrape timeout is used                                                                                                 | `nil`                                                         |
-| `metrics.prometheusRule.enabled`              | Set this to true to create prometheusRules for Prometheus operator                                                                                                        | `false`                                                       |
-| `metrics.prometheusRule.additionalLabels`     | Additional labels that can be used so prometheusRules will be discovered by Prometheus                                                                                    | `{}`                                                          |
-| `metrics.prometheusRule.namespace`            | namespace where prometheusRules resource should be created                                                                                                                | the same namespace as postgresql                              |
-| `metrics.prometheusRule.rules`                | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example.                                            | `[]`                                                          |
-| `metrics.image.registry`                      | PostgreSQL Image registry                                                                                                                                                 | `docker.io`                                                   |
-| `metrics.image.repository`                    | PostgreSQL Image name                                                                                                                                                     | `bitnami/postgres-exporter`                                   |
-| `metrics.image.tag`                           | PostgreSQL Image tag                                                                                                                                                      | `{TAG_NAME}`                                                  |
-| `metrics.image.pullPolicy`                    | PostgreSQL Image pull policy                                                                                                                                              | `IfNotPresent`                                                |
-| `metrics.image.pullSecrets`                   | Specify Image pull secrets                                                                                                                                                | `nil` (does not add image pull secrets to deployed pods)      |
-| `metrics.customMetrics`                       | Additional custom metrics                                                                                                                                                 | `nil`                                                         |
-| `metrics.securityContext.enabled`             | Enable security context for metrics                                                                                                                                       | `false`                                                       |
-| `metrics.securityContext.runAsUser`           | User ID for the container for metrics                                                                                                                                     | `1001`                                                        |
-| `metrics.livenessProbe.initialDelaySeconds`   | Delay before liveness probe is initiated                                                                                                                                  | 30                                                            |
-| `metrics.livenessProbe.periodSeconds`         | How often to perform the probe                                                                                                                                            | 10                                                            |
-| `metrics.livenessProbe.timeoutSeconds`        | When the probe times out                                                                                                                                                  | 5                                                             |
-| `metrics.livenessProbe.failureThreshold`      | Minimum consecutive failures for the probe to be considered failed after having succeeded.                                                                                | 6                                                             |
-| `metrics.livenessProbe.successThreshold`      | Minimum consecutive successes for the probe to be considered successful after having failed                                                                               | 1                                                             |
-| `metrics.readinessProbe.enabled`              | would you like a readinessProbe to be enabled                                                                                                                             | `true`                                                        |
-| `metrics.readinessProbe.initialDelaySeconds`  | Delay before liveness probe is initiated                                                                                                                                  | 5                                                             |
-| `metrics.readinessProbe.periodSeconds`        | How often to perform the probe                                                                                                                                            | 10                                                            |
-| `metrics.readinessProbe.timeoutSeconds`       | When the probe times out                                                                                                                                                  | 5                                                             |
-| `metrics.readinessProbe.failureThreshold`     | Minimum consecutive failures for the probe to be considered failed after having succeeded.                                                                                | 6                                                             |
-| `metrics.readinessProbe.successThreshold`     | Minimum consecutive successes for the probe to be considered successful after having failed                                                                               | 1                                                             |
-| `updateStrategy`                              | Update strategy policy                                                                                                                                                    | `{type: "RollingUpdate"}`                                     |
-
-Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
-
-```console
-$ helm install --name my-release \
-  --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \
-    stable/postgresql
-```
-
-The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`.
-
-Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
-
-```console
-$ helm install --name my-release -f values.yaml stable/postgresql
-```
-
-> **Tip**: You can use the default [values.yaml](values.yaml)
-
-## Configuration and installation details
-
-### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
-
-It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
-
-Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
-
-### Production configuration and horizontal scaling
-
-This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one.
-
-- Enable replication:
-```diff
-- replication.enabled: false
-+ replication.enabled: true
-```
-
-- Number of slaves replicas:
-```diff
-- replication.slaveReplicas: 1
-+ replication.slaveReplicas: 2
-```
-
-- Set synchronous commit mode:
-```diff
-- replication.synchronousCommit: "off"
-+ replication.synchronousCommit: "on"
-```
-
-- Number of replicas that will have synchronous replication:
-```diff
-- replication.numSynchronousReplicas: 0
-+ replication.numSynchronousReplicas: 1
-```
-
-- Start a prometheus exporter:
-```diff
-- metrics.enabled: false
-+ metrics.enabled: true
-```
-
-To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above.
-
-### Change PostgreSQL version
-
-To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=12.0.0-debian-9-r0`
-
-### postgresql.conf / pg_hba.conf files as configMap
-
-This helm chart also supports to customize the whole configuration file.
-
-Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server.
-
-Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}.
-
-In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options.
-
-### Allow settings to be loaded from files other than the default `postgresql.conf`
-
-If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory.
-Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`.
-
-Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option.
-
-### Initialize a fresh instance
-
-The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap.
-
-Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict.
-
-In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter.
-
-The allowed extensions are `.sh`, `.sql` and `.sql.gz`.
-
-### Metrics
-
-The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml).
-
-The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details.
-
-### Use of global variables
-
-In more complex scenarios, we may have the following tree of dependencies
-
-```
-                     +--------------+
-                     |              |
-        +------------+   Chart 1    +-----------+
-        |            |              |           |
-        |            --------+------+           |
-        |                    |                  |
-        |                    |                  |
-        |                    |                  |
-        |                    |                  |
-        v                    v                  v
-+-------+------+    +--------+------+  +--------+------+
-|              |    |               |  |               |
-|  PostgreSQL  |    |  Sub-chart 1  |  |  Sub-chart 2  |
-|              |    |               |  |               |
-+--------------+    +---------------+  +---------------+
-```
-
-The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters:
-
-```
-postgresql.postgresqlPassword=testtest
-subchart1.postgresql.postgresqlPassword=testtest
-subchart2.postgresql.postgresqlPassword=testtest
-postgresql.postgresqlDatabase=db1
-subchart1.postgresql.postgresqlDatabase=db1
-subchart2.postgresql.postgresqlDatabase=db1
-```
-
-If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows:
-
-```
-global.postgresql.postgresqlPassword=testtest
-global.postgresql.postgresqlDatabase=db1
-```
-
-This way, the credentials will be available in all of the subcharts.
-
-## Persistence
-
-The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container.
-
-Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
-See the [Parameters](#parameters) section to configure the PVC or to disable persistence.
-
-If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished.
-
-## NetworkPolicy
-
-To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`.
-
-For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace:
-
-```console
-$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}"
-```
-
-With NetworkPolicy enabled, traffic will be limited to just port 5432.
-
-For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL.
-This label will be displayed in the output of a successful install.
-
-## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image
-
-- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image.
-- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift.
-
-### Deploy chart using Docker Official PostgreSQL Image
-
-From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image.
-Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory.
-
-```
-helm install --name postgres \
-             --set image.repository=postgres \
-             --set image.tag=10.6 \
-             --set postgresqlDataDir=/data/pgdata \
-             --set persistence.mountPath=/data/ \
-             stable/postgresql
-```
-
-## Upgrade
-
-It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart:
-
-```bash
-$ helm upgrade my-release bitnami/influxdb \
-    --set postgresqlPassword=[POSTGRESQL_PASSWORD] \
-    --set replication.password=[REPLICATION_PASSWORD]
-```
-
-> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes.
-
-## 8.0.0
-
-Prefixes the port names with their protocols to comply with Istio conventions.
-
-If you depend on the port names in your setup, make sure to update them to reflect this change.
-
-## 7.1.0
-
-Adds support for LDAP configuration.
-
-## 7.0.0
-
-Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec.
-
-In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage.
-
-This major version bump signifies this change.
-
-## 6.5.7
-
-In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies:
-
- - protobuf
- - protobuf-c
- - json-c
- - geos
- - proj
-
-## 5.0.0
-
-In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/).
-
-For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs:
-
-```bash
-Welcome to the Bitnami postgresql container
-Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql
-Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues
-Send us your feedback at containers@bitnami.com
-
-INFO  ==> ** Starting PostgreSQL setup **
-NFO  ==> Validating settings in POSTGRESQL_* env vars..
-INFO  ==> Initializing PostgreSQL database...
-INFO  ==> postgresql.conf file not detected. Generating it...
-INFO  ==> pg_hba.conf file not detected. Generating it...
-INFO  ==> Deploying PostgreSQL with persisted data...
-INFO  ==> Configuring replication parameters
-INFO  ==> Loading custom scripts...
-INFO  ==> Enabling remote connections
-INFO  ==> Stopping PostgreSQL...
-INFO  ==> ** PostgreSQL setup finished! **
-
-INFO  ==> ** Starting PostgreSQL **
-  [1] FATAL:  database files are incompatible with server
-  [1] DETAIL:  The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3.
-```
-In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one.
-
-### 4.0.0
-
-This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately.
-
-IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error
-
-```
-The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development
-```
-
-### 3.0.0
-
-This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods.
-It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride.
-
-#### Breaking changes
-
-- `affinty` has been renamed to `master.affinity` and `slave.affinity`.
-- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`.
-- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`.
-
-### 2.0.0
-
-In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps:
-
- - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running
-
- ```console
-$ kubectl get svc
- ```
-
-- Install (not upgrade) the new version
-
-```console
-$ helm repo update
-$ helm install --name my-release stable/postgresql
-```
-
-- Connect to the new pod (you can obtain the name by running `kubectl get pods`):
-
-```console
-$ kubectl exec -it NAME bash
-```
-
-- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart:
-
-```console
-$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql
-```
-
-After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`).
-This operation could take some time depending on the database size.
-
-- Once you have the backup file, you can restore it with a command like the one below:
-
-```console
-$ psql -U postgres DATABASE_NAME < /tmp/backup.sql
-```
-
-In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt).
-
-If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below.
-
-```console
-$ psql -U postgres
-postgres=# drop database DATABASE_NAME;
-postgres=# create database DATABASE_NAME;
-postgres=# create user USER_NAME;
-postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD';
-postgres=# grant all privileges on database DATABASE_NAME to USER_NAME;
-postgres=# alter database DATABASE_NAME owner to USER_NAME;
-```
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/ci/default-values.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/ci/default-values.yaml
deleted file mode 100755 (executable)
index fc2ba60..0000000
+++ /dev/null
@@ -1 +0,0 @@
-# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml.
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/ci/shmvolume-disabled-values.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/ci/shmvolume-disabled-values.yaml
deleted file mode 100755 (executable)
index 347d3b4..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-shmVolume:
-  enabled: false
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/files/README.md b/helm/infrastructure/subcharts/kong/charts/postgresql/files/README.md
deleted file mode 100755 (executable)
index 1813a2f..0000000
+++ /dev/null
@@ -1 +0,0 @@
-Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map.
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/files/conf.d/README.md b/helm/infrastructure/subcharts/kong/charts/postgresql/files/conf.d/README.md
deleted file mode 100755 (executable)
index 184c187..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files.
-These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`.
-
-More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file).
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/helm/infrastructure/subcharts/kong/charts/postgresql/files/docker-entrypoint-initdb.d/README.md
deleted file mode 100755 (executable)
index cba3809..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image.
-
-More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository.
\ No newline at end of file
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/NOTES.txt b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/NOTES.txt
deleted file mode 100755 (executable)
index 3b5e6c6..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-** Please be patient while the chart is being deployed **
-
-PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster:
-
-    {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection
-{{- if .Values.replication.enabled }}
-    {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection
-{{- end }}
-
-{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }}
-
-To get the password for "postgres" run:
-
-    export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode)
-{{- end }}
-
-To get the password for "{{ template "postgresql.username" . }}" run:
-
-    export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode)
-
-To connect to your database run the following command:
-
-    kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}
-   --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }}
-
-{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}
-Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster.
-{{- end }}
-
-To connect to your database from outside the cluster execute the following commands:
-
-{{- if contains "NodePort" .Values.service.type }}
-
-    export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
-    export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }})
-    {{ if (include "postgresql.password" . )  }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }}
-
-{{- else if contains "LoadBalancer" .Values.service.type }}
-
-  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
-        Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}'
-
-    export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
-    {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }}
-
-{{- else if contains "ClusterIP" .Values.service.type }}
-
-    kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} &
-    {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }}
-
-{{- end }}
-
-{{- include "postgresql.validateValues" . -}}
-
-{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }}
-
-WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
-+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
-
-{{- end }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/_helpers.tpl b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/_helpers.tpl
deleted file mode 100755 (executable)
index 3ee5572..0000000
+++ /dev/null
@@ -1,420 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "postgresql.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-*/}}
-{{- define "postgresql.fullname" -}}
-{{- if .Values.fullnameOverride -}}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- if contains $name .Release.Name -}}
-{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{- end -}}
-{{- end -}}
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-*/}}
-{{- define "postgresql.master.fullname" -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}}
-{{- if .Values.replication.enabled -}}
-{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Return the appropriate apiVersion for networkpolicy.
-*/}}
-{{- define "postgresql.networkPolicy.apiVersion" -}}
-{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}}
-"extensions/v1beta1"
-{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}}
-"networking.k8s.io/v1"
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "postgresql.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Return the proper PostgreSQL image name
-*/}}
-{{- define "postgresql.image" -}}
-{{- $registryName := .Values.image.registry -}}
-{{- $repositoryName := .Values.image.repository -}}
-{{- $tag := .Values.image.tag | toString -}}
-{{/*
-Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
-but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
-Also, we can't use a single if because lazy evaluation is not an option
-*/}}
-{{- if .Values.global }}
-    {{- if .Values.global.imageRegistry }}
-        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
-    {{- else -}}
-        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
-    {{- end -}}
-{{- else -}}
-    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Return PostgreSQL postgres user password
-*/}}
-{{- define "postgresql.postgres.password" -}}
-{{- if .Values.global.postgresql.postgresqlPostgresPassword }}
-    {{- .Values.global.postgresql.postgresqlPostgresPassword -}}
-{{- else if .Values.postgresqlPostgresPassword -}}
-    {{- .Values.postgresqlPostgresPassword -}}
-{{- else -}}
-    {{- randAlphaNum 10 -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Return PostgreSQL password
-*/}}
-{{- define "postgresql.password" -}}
-{{- if .Values.global.postgresql.postgresqlPassword }}
-    {{- .Values.global.postgresql.postgresqlPassword -}}
-{{- else if .Values.postgresqlPassword -}}
-    {{- .Values.postgresqlPassword -}}
-{{- else -}}
-    {{- randAlphaNum 10 -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Return PostgreSQL replication password
-*/}}
-{{- define "postgresql.replication.password" -}}
-{{- if .Values.global.postgresql.replicationPassword }}
-    {{- .Values.global.postgresql.replicationPassword -}}
-{{- else if .Values.replication.password -}}
-    {{- .Values.replication.password -}}
-{{- else -}}
-    {{- randAlphaNum 10 -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Return PostgreSQL username
-*/}}
-{{- define "postgresql.username" -}}
-{{- if .Values.global.postgresql.postgresqlUsername }}
-    {{- .Values.global.postgresql.postgresqlUsername -}}
-{{- else -}}
-    {{- .Values.postgresqlUsername -}}
-{{- end -}}
-{{- end -}}
-
-
-{{/*
-Return PostgreSQL replication username
-*/}}
-{{- define "postgresql.replication.username" -}}
-{{- if .Values.global.postgresql.replicationUser }}
-    {{- .Values.global.postgresql.replicationUser -}}
-{{- else -}}
-    {{- .Values.replication.user -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Return PostgreSQL port
-*/}}
-{{- define "postgresql.port" -}}
-{{- if .Values.global.postgresql.servicePort }}
-    {{- .Values.global.postgresql.servicePort -}}
-{{- else -}}
-    {{- .Values.service.port -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Return PostgreSQL created database
-*/}}
-{{- define "postgresql.database" -}}
-{{- if .Values.global.postgresql.postgresqlDatabase }}
-    {{- .Values.global.postgresql.postgresqlDatabase -}}
-{{- else if .Values.postgresqlDatabase -}}
-    {{- .Values.postgresqlDatabase -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Return the proper image name to change the volume permissions
-*/}}
-{{- define "postgresql.volumePermissions.image" -}}
-{{- $registryName := .Values.volumePermissions.image.registry -}}
-{{- $repositoryName := .Values.volumePermissions.image.repository -}}
-{{- $tag := .Values.volumePermissions.image.tag | toString -}}
-{{/*
-Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
-but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
-Also, we can't use a single if because lazy evaluation is not an option
-*/}}
-{{- if .Values.global }}
-    {{- if .Values.global.imageRegistry }}
-        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
-    {{- else -}}
-        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
-    {{- end -}}
-{{- else -}}
-    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Return the proper PostgreSQL metrics image name
-*/}}
-{{- define "postgresql.metrics.image" -}}
-{{- $registryName :=  default "docker.io" .Values.metrics.image.registry -}}
-{{- $repositoryName := .Values.metrics.image.repository -}}
-{{- $tag := default "latest" .Values.metrics.image.tag | toString -}}
-{{/*
-Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
-but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
-Also, we can't use a single if because lazy evaluation is not an option
-*/}}
-{{- if .Values.global }}
-    {{- if .Values.global.imageRegistry }}
-        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
-    {{- else -}}
-        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
-    {{- end -}}
-{{- else -}}
-    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Get the password secret.
-*/}}
-{{- define "postgresql.secretName" -}}
-{{- if .Values.global.postgresql.existingSecret }}
-    {{- printf "%s" .Values.global.postgresql.existingSecret -}}
-{{- else if .Values.existingSecret -}}
-    {{- printf "%s" .Values.existingSecret -}}
-{{- else -}}
-    {{- printf "%s" (include "postgresql.fullname" .) -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Return true if a secret object should be created
-*/}}
-{{- define "postgresql.createSecret" -}}
-{{- if .Values.global.postgresql.existingSecret }}
-{{- else if .Values.existingSecret -}}
-{{- else -}}
-    {{- true -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Get the configuration ConfigMap name.
-*/}}
-{{- define "postgresql.configurationCM" -}}
-{{- if .Values.configurationConfigMap -}}
-{{- printf "%s" (tpl .Values.configurationConfigMap $) -}}
-{{- else -}}
-{{- printf "%s-configuration" (include "postgresql.fullname" .) -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Get the extended configuration ConfigMap name.
-*/}}
-{{- define "postgresql.extendedConfigurationCM" -}}
-{{- if .Values.extendedConfConfigMap -}}
-{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}}
-{{- else -}}
-{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Get the initialization scripts ConfigMap name.
-*/}}
-{{- define "postgresql.initdbScriptsCM" -}}
-{{- if .Values.initdbScriptsConfigMap -}}
-{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}}
-{{- else -}}
-{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Get the initialization scripts Secret name.
-*/}}
-{{- define "postgresql.initdbScriptsSecret" -}}
-{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}}
-{{- end -}}
-
-{{/*
-Get the metrics ConfigMap name.
-*/}}
-{{- define "postgresql.metricsCM" -}}
-{{- printf "%s-metrics" (include "postgresql.fullname" .) -}}
-{{- end -}}
-
-{{/*
-Return the proper Docker Image Registry Secret Names
-*/}}
-{{- define "postgresql.imagePullSecrets" -}}
-{{/*
-Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
-but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
-Also, we can not use a single if because lazy evaluation is not an option
-*/}}
-{{- if .Values.global }}
-{{- if .Values.global.imagePullSecrets }}
-imagePullSecrets:
-{{- range .Values.global.imagePullSecrets }}
-  - name: {{ . }}
-{{- end }}
-{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}
-imagePullSecrets:
-{{- range .Values.image.pullSecrets }}
-  - name: {{ . }}
-{{- end }}
-{{- range .Values.metrics.image.pullSecrets }}
-  - name: {{ . }}
-{{- end }}
-{{- range .Values.volumePermissions.image.pullSecrets }}
-  - name: {{ . }}
-{{- end }}
-{{- end -}}
-{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}
-imagePullSecrets:
-{{- range .Values.image.pullSecrets }}
-  - name: {{ . }}
-{{- end }}
-{{- range .Values.metrics.image.pullSecrets }}
-  - name: {{ . }}
-{{- end }}
-{{- range .Values.volumePermissions.image.pullSecrets }}
-  - name: {{ . }}
-{{- end }}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Get the readiness probe command
-*/}}
-{{- define "postgresql.readinessProbeCommand" -}}
-- |
-{{- if (include "postgresql.database" .) }}
-  exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }}
-{{- else }}
-  exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }}
-{{- end }}
-{{- if contains "bitnami/" .Values.image.repository }}
-  [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ]
-{{- end -}}
-{{- end -}}
-
-{{/*
-Return  the proper Storage Class
-*/}}
-{{- define "postgresql.storageClass" -}}
-{{/*
-Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
-but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
-*/}}
-{{- if .Values.global -}}
-    {{- if .Values.global.storageClass -}}
-        {{- if (eq "-" .Values.global.storageClass) -}}
-            {{- printf "storageClassName: \"\"" -}}
-        {{- else }}
-            {{- printf "storageClassName: %s" .Values.global.storageClass -}}
-        {{- end -}}
-    {{- else -}}
-        {{- if .Values.persistence.storageClass -}}
-              {{- if (eq "-" .Values.persistence.storageClass) -}}
-                  {{- printf "storageClassName: \"\"" -}}
-              {{- else }}
-                  {{- printf "storageClassName: %s" .Values.persistence.storageClass -}}
-              {{- end -}}
-        {{- end -}}
-    {{- end -}}
-{{- else -}}
-    {{- if .Values.persistence.storageClass -}}
-        {{- if (eq "-" .Values.persistence.storageClass) -}}
-            {{- printf "storageClassName: \"\"" -}}
-        {{- else }}
-            {{- printf "storageClassName: %s" .Values.persistence.storageClass -}}
-        {{- end -}}
-    {{- end -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Renders a value that contains template.
-Usage:
-{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }}
-*/}}
-{{- define "postgresql.tplValue" -}}
-    {{- if typeIs "string" .value }}
-        {{- tpl .value .context }}
-    {{- else }}
-        {{- tpl (.value | toYaml) .context }}
-    {{- end }}
-{{- end -}}
-
-{{/*
-Return the appropriate apiVersion for statefulset.
-*/}}
-{{- define "postgresql.statefulset.apiVersion" -}}
-{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
-{{- print "apps/v1beta2" -}}
-{{- else -}}
-{{- print "apps/v1" -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Compile all warnings into a single message, and call fail.
-*/}}
-{{- define "postgresql.validateValues" -}}
-{{- $messages := list -}}
-{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}}
-{{- $messages := without $messages "" -}}
-{{- $message := join "\n" $messages -}}
-
-{{- if $message -}}
-{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap
-*/}}
-{{- define "postgresql.validateValues.ldapConfigurationMethod" -}}
-{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }}
-postgresql: ldap.url, ldap.server
-    You cannot set both `ldap.url` and `ldap.server` at the same time.
-    Please provide a unique way to configure LDAP.
-    More info at https://www.postgresql.org/docs/current/auth-ldap.html
-{{- end -}}
-{{- end -}}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/configmap.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/configmap.yaml
deleted file mode 100755 (executable)
index d2178c0..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }}
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ template "postgresql.fullname" . }}-configuration
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-data:
-{{- if (.Files.Glob "files/postgresql.conf") }}
-{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }}
-{{- else if .Values.postgresqlConfiguration }}
-  postgresql.conf: |
-{{- range $key, $value := default dict .Values.postgresqlConfiguration }}
-    {{ $key | snakecase }}={{ $value }}
-{{- end }}
-{{- end }}
-{{- if (.Files.Glob "files/pg_hba.conf") }}
-{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }}
-{{- else if .Values.pgHbaConfiguration }}
-  pg_hba.conf: |
-{{ .Values.pgHbaConfiguration | indent 4 }}
-{{- end }}
-{{ end }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/extended-config-configmap.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/extended-config-configmap.yaml
deleted file mode 100755 (executable)
index 8a41195..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}}
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ template "postgresql.fullname" . }}-extended-configuration
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-data:
-{{- with .Files.Glob "files/conf.d/*.conf" }}
-{{ .AsConfig | indent 2 }}
-{{- end }}
-{{ with .Values.postgresqlExtendedConf }}
-  override.conf: |
-{{- range $key, $value := . }}
-    {{ $key | snakecase }}={{ $value }}
-{{- end }}
-{{- end }}
-{{- end }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/initialization-configmap.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/initialization-configmap.yaml
deleted file mode 100755 (executable)
index 8eb5e05..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }}
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ template "postgresql.fullname" . }}-init-scripts
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }}
-binaryData:
-{{- range $path, $bytes := . }}
-  {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }}
-{{- end }}
-{{- end }}
-data:
-{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }}
-{{ .AsConfig | indent 2 }}
-{{- end }}
-{{- with .Values.initdbScripts }}
-{{ toYaml . | indent 2 }}
-{{- end }}
-{{- end }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/metrics-configmap.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/metrics-configmap.yaml
deleted file mode 100755 (executable)
index 524aa2f..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ template "postgresql.metricsCM" . }}
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-data:
-  custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }}
-{{- end }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/metrics-svc.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/metrics-svc.yaml
deleted file mode 100755 (executable)
index c610f09..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-{{- if .Values.metrics.enabled }}
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ template "postgresql.fullname" . }}-metrics
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-  annotations:
-{{ toYaml .Values.metrics.service.annotations | indent 4 }}
-spec:
-  type: {{ .Values.metrics.service.type }}
-  {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }}
-  loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }}
-  {{- end }}
-  ports:
-    - name: http-metrics
-      port: 9187
-      targetPort: http-metrics
-  selector:
-    app: {{ template "postgresql.name" . }}
-    release: {{ .Release.Name }}
-    role: master
-{{- end }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/networkpolicy.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/networkpolicy.yaml
deleted file mode 100755 (executable)
index ea1fc9b..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-{{- if .Values.networkPolicy.enabled }}
-kind: NetworkPolicy
-apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }}
-metadata:
-  name: {{ template "postgresql.fullname" . }}
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-spec:
-  podSelector:
-    matchLabels:
-      app: {{ template "postgresql.name" . }}
-      release: {{ .Release.Name | quote }}
-  ingress:
-    # Allow inbound connections
-    - ports:
-        - port: {{ template "postgresql.port" . }}
-      {{- if not .Values.networkPolicy.allowExternal }}
-      from:
-        - podSelector:
-            matchLabels:
-              {{ template "postgresql.fullname" . }}-client: "true"
-          {{- if .Values.networkPolicy.explicitNamespacesSelector }}
-          namespaceSelector:
-{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }}
-          {{- end }}
-        - podSelector:
-            matchLabels:
-              app: {{ template "postgresql.name" . }}
-              release: {{ .Release.Name | quote }}
-              role: slave
-      {{- end }}
-    # Allow prometheus scrapes
-    - ports:
-        - port: 9187
-{{- end }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/prometheusrule.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/prometheusrule.yaml
deleted file mode 100755 (executable)
index 44f1242..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
-apiVersion: monitoring.coreos.com/v1
-kind: PrometheusRule
-metadata:
-  name: {{ template "postgresql.fullname" . }}
-{{- with .Values.metrics.prometheusRule.namespace }}
-  namespace: {{ . }}
-{{- end }}
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-{{- with .Values.metrics.prometheusRule.additionalLabels }}
-{{ toYaml . | indent 4 }}
-{{- end }}
-spec:
-{{- with .Values.metrics.prometheusRule.rules }}
-  groups:
-    - name: {{ template "postgresql.name" $ }}
-      rules: {{ tpl (toYaml .) $ | nindent 8 }}
-{{- end }}
-{{- end }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/secrets.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/secrets.yaml
deleted file mode 100755 (executable)
index 094d18b..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-{{- if (include "postgresql.createSecret" .) }}
-apiVersion: v1
-kind: Secret
-metadata:
-  name: {{ template "postgresql.fullname" . }}
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-type: Opaque
-data:
-  {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }}
-  postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }}
-  {{- end }}
-  postgresql-password: {{ include "postgresql.password" . | b64enc | quote }}
-  {{- if .Values.replication.enabled }}
-  postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }}
-  {{- end }}
-  {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}}
-  postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }}
-  {{- end }}
-{{- end -}}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/serviceaccount.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/serviceaccount.yaml
deleted file mode 100755 (executable)
index 27e5b51..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }}
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-  name: {{ template "postgresql.fullname" . }}
-{{- end }}
\ No newline at end of file
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/servicemonitor.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/servicemonitor.yaml
deleted file mode 100755 (executable)
index f3a529a..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
-apiVersion: monitoring.coreos.com/v1
-kind: ServiceMonitor
-metadata:
-  name: {{ include "postgresql.fullname" . }}
-  {{- if .Values.metrics.serviceMonitor.namespace }}
-  namespace: {{ .Values.metrics.serviceMonitor.namespace }}
-  {{- end }}
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-    {{- if .Values.metrics.serviceMonitor.additionalLabels }}
-{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }}
-    {{- end }}
-spec:
-  endpoints:
-    - port: http-metrics
-      {{- if .Values.metrics.serviceMonitor.interval }}
-      interval: {{ .Values.metrics.serviceMonitor.interval }}
-      {{- end }}
-      {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
-      scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
-      {{- end }}
-  namespaceSelector:
-    matchNames:
-      - {{ .Release.Namespace }}
-  selector:
-    matchLabels:
-      app: {{ template "postgresql.name" . }}
-      release: {{ .Release.Name }}
-{{- end }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/statefulset-slaves.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/statefulset-slaves.yaml
deleted file mode 100755 (executable)
index 3506199..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-{{- if .Values.replication.enabled }}
-apiVersion: {{ template "postgresql.statefulset.apiVersion" . }}
-kind: StatefulSet
-metadata:
-  name: "{{ template "postgresql.fullname" . }}-slave"
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-{{- with .Values.slave.labels }}
-{{ toYaml . | indent 4 }}
-{{- end }}
-{{- with .Values.slave.annotations }}
-  annotations:
-{{ toYaml . | indent 4 }}
-{{- end }}
-spec:
-  serviceName: {{ template "postgresql.fullname" . }}-headless
-  replicas: {{ .Values.replication.slaveReplicas }}
-  selector:
-    matchLabels:
-      app: {{ template "postgresql.name" . }}
-      release: {{ .Release.Name | quote }}
-      role: slave
-  template:
-    metadata:
-      name: {{ template "postgresql.fullname" . }}
-      labels:
-        app: {{ template "postgresql.name" . }}
-        chart: {{ template "postgresql.chart" . }}
-        release: {{ .Release.Name | quote }}
-        heritage: {{ .Release.Service | quote }}
-        role: slave
-{{- with .Values.slave.podLabels }}
-{{ toYaml . | indent 8 }}
-{{- end }}
-{{- with .Values.slave.podAnnotations }}
-      annotations:
-{{ toYaml . | indent 8 }}
-{{- end }}
-    spec:
-      {{- if .Values.schedulerName }}
-      schedulerName: "{{ .Values.schedulerName }}"
-      {{- end }}
-{{- include "postgresql.imagePullSecrets" . | indent 6 }}
-      {{- if .Values.slave.nodeSelector }}
-      nodeSelector:
-{{ toYaml .Values.slave.nodeSelector | indent 8 }}
-      {{- end }}
-      {{- if .Values.slave.affinity }}
-      affinity:
-{{ toYaml .Values.slave.affinity | indent 8 }}
-      {{- end }}
-      {{- if .Values.slave.tolerations }}
-      tolerations:
-{{ toYaml .Values.slave.tolerations | indent 8 }}
-      {{- end }}
-      {{- if .Values.terminationGracePeriodSeconds }}
-      terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
-      {{- end }}
-      {{- if .Values.securityContext.enabled }}
-      securityContext:
-        fsGroup: {{ .Values.securityContext.fsGroup }}
-      {{- end }}
-      {{- if .Values.serviceAccount.enabled }}
-      serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}}
-      {{- end }}
-      {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) .Values.shmVolume.enabled }}
-      initContainers:
-      {{- if or (and .Values.volumePermissions.enabled .Values.persistence.enabled) .Values.shmVolume.enabled }}
-        - name: init-chmod-data
-          image: {{ template "postgresql.volumePermissions.image" . }}
-          imagePullPolicy: "{{ .Values.volumePermissions.image.pullPolicy }}"
-          {{- if .Values.resources }}
-          resources: {{- toYaml .Values.resources | nindent 12 }}
-          {{- end }}
-          command:
-            - /bin/sh
-            - -c
-            - |
-              mkdir -p {{ .Values.persistence.mountPath }}/data
-              chmod 700 {{ .Values.persistence.mountPath }}/data
-              find {{ .Values.persistence.mountPath }} -mindepth 0 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \
-                xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}
-              {{- if .Values.shmVolume.enabled }}
-              chmod -R 777 /dev/shm
-              {{- end }}
-          securityContext:
-            runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }}
-          volumeMounts:
-            - name: data
-              mountPath: {{ .Values.persistence.mountPath }}
-              subPath: {{ .Values.persistence.subPath }}
-            {{- if .Values.shmVolume.enabled }}
-            - name: dshm
-              mountPath: /dev/shm
-            {{- end }}
-      {{- end }}
-      {{- if .Values.slave.extraInitContainers }}
-{{ tpl .Values.slave.extraInitContainers . | indent 8 }}
-      {{- end }}
-      {{- end }}
-      {{- if .Values.slave.priorityClassName }}
-      priorityClassName: {{ .Values.slave.priorityClassName }}
-      {{- end }}
-      containers:
-        - name: {{ template "postgresql.fullname" . }}
-          image: {{ template "postgresql.image" . }}
-          imagePullPolicy: "{{ .Values.image.pullPolicy }}"
-          {{- if .Values.resources }}
-          resources: {{- toYaml .Values.resources | nindent 12 }}
-          {{- end }}
-          {{- if .Values.securityContext.enabled }}
-          securityContext:
-            runAsUser: {{ .Values.securityContext.runAsUser }}
-          {{- end }}
-          env:
-            - name: BITNAMI_DEBUG
-              value: {{ ternary "true" "false" .Values.image.debug | quote }}
-            - name: POSTGRESQL_VOLUME_DIR
-              value: "{{ .Values.persistence.mountPath }}"
-            - name: POSTGRESQL_PORT_NUMBER
-              value: "{{ template "postgresql.port" . }}"
-            {{- if .Values.persistence.mountPath }}
-            - name: PGDATA
-              value: {{ .Values.postgresqlDataDir | quote }}
-            {{- end }}
-            - name: POSTGRES_REPLICATION_MODE
-              value: "slave"
-            - name: POSTGRES_REPLICATION_USER
-              value: {{ include "postgresql.replication.username" . | quote }}
-            {{- if .Values.usePasswordFile }}
-            - name: POSTGRES_REPLICATION_PASSWORD_FILE
-              value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password"
-            {{- else }}
-            - name: POSTGRES_REPLICATION_PASSWORD
-              valueFrom:
-                secretKeyRef:
-                  name: {{ template "postgresql.secretName" . }}
-                  key: postgresql-replication-password
-            {{- end }}
-            - name: POSTGRES_CLUSTER_APP_NAME
-              value: {{ .Values.replication.applicationName }}
-            - name: POSTGRES_MASTER_HOST
-              value: {{ template "postgresql.fullname" . }}
-            - name: POSTGRES_MASTER_PORT_NUMBER
-              value: {{ include "postgresql.port" . | quote }}
-            {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }}
-            {{- if .Values.usePasswordFile }}
-            - name: POSTGRES_POSTGRES_PASSWORD_FILE
-              value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password"
-            {{- else }}
-            - name: POSTGRES_POSTGRES_PASSWORD
-              valueFrom:
-                secretKeyRef:
-                  name: {{ template "postgresql.secretName" . }}
-                  key: postgresql-postgres-password
-            {{- end }}
-            {{- end }}
-            {{- if .Values.usePasswordFile }}
-            - name: POSTGRES_PASSWORD_FILE
-              value: "/opt/bitnami/postgresql/secrets/postgresql-password"
-            {{- else }}
-            - name: POSTGRES_PASSWORD
-              valueFrom:
-                secretKeyRef:
-                  name: {{ template "postgresql.secretName" . }}
-                  key: postgresql-password
-            {{- end }}
-          ports:
-            - name: tcp-postgresql
-              containerPort: {{ template "postgresql.port" . }}
-          {{- if .Values.livenessProbe.enabled }}
-          livenessProbe:
-            exec:
-              command:
-                - /bin/sh
-                - -c
-                {{- if (include "postgresql.database" .) }}
-                - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }}
-                {{- else }}
-                - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }}
-                {{- end }}
-            initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
-            periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
-            timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
-            successThreshold: {{ .Values.livenessProbe.successThreshold }}
-            failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
-          {{- end }}
-          {{- if .Values.readinessProbe.enabled }}
-          readinessProbe:
-            exec:
-              command:
-                - /bin/sh
-                - -c
-                - -e
-                {{- include "postgresql.readinessProbeCommand" . | nindent 16 }}
-            initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
-            periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
-            timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
-            successThreshold: {{ .Values.readinessProbe.successThreshold }}
-            failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
-          {{- end }}
-          volumeMounts:
-            {{- if .Values.usePasswordFile }}
-            - name: postgresql-password
-              mountPath: /opt/bitnami/postgresql/secrets/
-            {{- end }}
-            {{- if .Values.shmVolume.enabled }}
-            - name: dshm
-              mountPath: /dev/shm
-            {{- end }}
-            {{- if .Values.persistence.enabled }}
-            - name: data
-              mountPath: {{ .Values.persistence.mountPath }}
-              subPath: {{ .Values.persistence.subPath }}
-            {{ end }}
-            {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}
-            - name: postgresql-extended-config
-              mountPath: /bitnami/postgresql/conf/conf.d/
-            {{- end }}
-            {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }}
-            - name: postgresql-config
-              mountPath: /bitnami/postgresql/conf
-            {{- end }}
-            {{- if .Values.slave.extraVolumeMounts }}
-            {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }}
-            {{- end }}
-      volumes:
-        {{- if .Values.usePasswordFile }}
-        - name: postgresql-password
-          secret:
-            secretName: {{ template "postgresql.secretName" . }}
-        {{- end }}
-        {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}}
-        - name: postgresql-config
-          configMap:
-            name: {{ template "postgresql.configurationCM" . }}
-        {{- end }}
-        {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}
-        - name: postgresql-extended-config
-          configMap:
-            name: {{ template "postgresql.extendedConfigurationCM" . }}
-        {{- end }}
-        {{- if .Values.shmVolume.enabled }}
-        - name: dshm
-          emptyDir:
-            medium: Memory
-            sizeLimit: 1Gi
-        {{- end }}
-        {{- if not .Values.persistence.enabled }}
-        - name: data
-          emptyDir: {}
-        {{- end }}
-        {{- if .Values.slave.extraVolumes }}
-        {{- toYaml .Values.slave.extraVolumes | nindent 8 }}
-        {{- end }}
-  updateStrategy:
-    type: {{ .Values.updateStrategy.type }}
-    {{- if (eq "Recreate" .Values.updateStrategy.type) }}
-    rollingUpdate: null
-    {{- end }}
-{{- if .Values.persistence.enabled }}
-  volumeClaimTemplates:
-    - metadata:
-        name: data
-      {{- with .Values.persistence.annotations }}
-        annotations:
-        {{- range $key, $value := . }}
-          {{ $key }}: {{ $value }}
-        {{- end }}
-      {{- end }}
-      spec:
-        accessModes:
-        {{- range .Values.persistence.accessModes }}
-          - {{ . | quote }}
-        {{- end }}
-        resources:
-          requests:
-            storage: {{ .Values.persistence.size | quote }}
-        {{ include "postgresql.storageClass" . }}
-{{- end }}
-{{- end }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/statefulset.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/statefulset.yaml
deleted file mode 100755 (executable)
index 7b1b4cf..0000000
+++ /dev/null
@@ -1,438 +0,0 @@
-apiVersion: {{ template "postgresql.statefulset.apiVersion" . }}
-kind: StatefulSet
-metadata:
-  name: {{ template "postgresql.master.fullname" . }}
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-{{- with .Values.master.labels }}
-{{ toYaml . | indent 4 }}
-{{- end }}
-{{- with .Values.master.annotations }}
-  annotations:
-{{ toYaml . | indent 4 }}
-{{- end }}
-spec:
-  serviceName: {{ template "postgresql.fullname" . }}-headless
-  replicas: 1
-  updateStrategy:
-    type: {{ .Values.updateStrategy.type }}
-    {{- if (eq "Recreate" .Values.updateStrategy.type) }}
-    rollingUpdate: null
-    {{- end }}
-  selector:
-    matchLabels:
-      app: {{ template "postgresql.name" . }}
-      release: {{ .Release.Name | quote }}
-      role: master
-  template:
-    metadata:
-      name: {{ template "postgresql.fullname" . }}
-      labels:
-        app: {{ template "postgresql.name" . }}
-        chart: {{ template "postgresql.chart" . }}
-        release: {{ .Release.Name | quote }}
-        heritage: {{ .Release.Service | quote }}
-        role: master
-{{- with .Values.master.podLabels }}
-{{ toYaml . | indent 8 }}
-{{- end }}
-{{- with .Values.master.podAnnotations }}
-      annotations:
-{{ toYaml . | indent 8 }}
-{{- end }}
-    spec:
-      {{- if .Values.schedulerName }}
-      schedulerName: "{{ .Values.schedulerName }}"
-      {{- end }}
-{{- include "postgresql.imagePullSecrets" . | indent 6 }}
-      {{- if .Values.master.nodeSelector }}
-      nodeSelector:
-{{ toYaml .Values.master.nodeSelector | indent 8 }}
-      {{- end }}
-      {{- if .Values.master.affinity }}
-      affinity:
-{{ toYaml .Values.master.affinity | indent 8 }}
-      {{- end }}
-      {{- if .Values.master.tolerations }}
-      tolerations:
-{{ toYaml .Values.master.tolerations | indent 8 }}
-      {{- end }}
-      {{- if .Values.terminationGracePeriodSeconds }}
-      terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
-      {{- end }}
-      {{- if .Values.securityContext.enabled }}
-      securityContext:
-        fsGroup: {{ .Values.securityContext.fsGroup }}
-      {{- end }}
-      {{- if .Values.serviceAccount.enabled }}
-      serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }}
-      {{- end }}
-      {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) .Values.shmVolume.enabled }}
-      initContainers:
-      {{- if or (and .Values.volumePermissions.enabled .Values.persistence.enabled) .Values.shmVolume.enabled }}
-        - name: init-chmod-data
-          image: {{ template "postgresql.volumePermissions.image" . }}
-          imagePullPolicy: "{{ .Values.volumePermissions.image.pullPolicy }}"
-          {{- if .Values.resources }}
-          resources: {{- toYaml .Values.resources | nindent 12 }}
-          {{- end }}
-          command:
-            - /bin/sh
-            - -c
-            - |
-              mkdir -p {{ .Values.persistence.mountPath }}/data
-              chmod 700 {{ .Values.persistence.mountPath }}/data
-              find {{ .Values.persistence.mountPath }} -mindepth 0 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \
-                xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}
-              {{- if .Values.shmVolume.enabled }}
-              chmod -R 777 /dev/shm
-              {{- end }}
-          securityContext:
-            runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }}
-          volumeMounts:
-            - name: data
-              mountPath: {{ .Values.persistence.mountPath }}
-              subPath: {{ .Values.persistence.subPath }}
-            {{- if .Values.shmVolume.enabled }}
-            - name: dshm
-              mountPath: /dev/shm
-            {{- end }}
-      {{- end }}
-      {{- if .Values.master.extraInitContainers }}
-{{ tpl .Values.master.extraInitContainers . | indent 8 }}
-      {{- end }}
-      {{- end }}
-      {{- if .Values.master.priorityClassName }}
-      priorityClassName: {{ .Values.master.priorityClassName }}
-      {{- end }}
-      containers:
-        - name: {{ template "postgresql.fullname" . }}
-          image: {{ template "postgresql.image" . }}
-          imagePullPolicy: "{{ .Values.image.pullPolicy }}"
-          {{- if .Values.resources }}
-          resources: {{- toYaml .Values.resources | nindent 12 }}
-          {{- end }}
-          {{- if .Values.securityContext.enabled }}
-          securityContext:
-            runAsUser: {{ .Values.securityContext.runAsUser }}
-          {{- end }}
-          env:
-            - name: BITNAMI_DEBUG
-              value: {{ ternary "true" "false" .Values.image.debug | quote }}
-            - name: POSTGRESQL_PORT_NUMBER
-              value: "{{ template "postgresql.port" . }}"
-            - name: POSTGRESQL_VOLUME_DIR
-              value: "{{ .Values.persistence.mountPath }}"
-            {{- if .Values.postgresqlInitdbArgs }}
-            - name: POSTGRES_INITDB_ARGS
-              value: {{ .Values.postgresqlInitdbArgs | quote }}
-            {{- end }}
-            {{- if .Values.postgresqlInitdbWalDir }}
-            - name: POSTGRES_INITDB_WALDIR
-              value: {{ .Values.postgresqlInitdbWalDir | quote }}
-            {{- end }}
-            {{- if .Values.initdbUser }}
-            - name: POSTGRESQL_INITSCRIPTS_USERNAME
-              value: {{ .Values.initdbUser }}
-            {{- end }}
-            {{- if .Values.initdbPassword }}
-            - name: POSTGRESQL_INITSCRIPTS_PASSWORD
-              value: .Values.initdbPassword
-            {{- end }}
-            {{- if .Values.persistence.mountPath }}
-            - name: PGDATA
-              value: {{ .Values.postgresqlDataDir | quote }}
-            {{- end }}
-            {{- if .Values.replication.enabled }}
-            - name: POSTGRES_REPLICATION_MODE
-              value: "master"
-            - name: POSTGRES_REPLICATION_USER
-              value: {{ include "postgresql.replication.username" . | quote }}
-            {{- if .Values.usePasswordFile }}
-            - name: POSTGRES_REPLICATION_PASSWORD_FILE
-              value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password"
-            {{- else }}
-            - name: POSTGRES_REPLICATION_PASSWORD
-              valueFrom:
-                secretKeyRef:
-                  name: {{ template "postgresql.secretName" . }}
-                  key: postgresql-replication-password
-            {{- end }}
-            {{- if not (eq .Values.replication.synchronousCommit "off")}}
-            - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE
-              value: {{ .Values.replication.synchronousCommit | quote }}
-            - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS
-              value: {{ .Values.replication.numSynchronousReplicas | quote }}
-            {{- end }}
-            - name: POSTGRES_CLUSTER_APP_NAME
-              value: {{ .Values.replication.applicationName }}
-            {{- end }}
-            {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }}
-            {{- if .Values.usePasswordFile }}
-            - name: POSTGRES_POSTGRES_PASSWORD_FILE
-              value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password"
-            {{- else }}
-            - name: POSTGRES_POSTGRES_PASSWORD
-              valueFrom:
-                secretKeyRef:
-                  name: {{ template "postgresql.secretName" . }}
-                  key: postgresql-postgres-password
-            {{- end }}
-            {{- end }}
-            - name: POSTGRES_USER
-              value: {{ include "postgresql.username" . | quote }}
-            {{- if .Values.usePasswordFile }}
-            - name: POSTGRES_PASSWORD_FILE
-              value: "/opt/bitnami/postgresql/secrets/postgresql-password"
-            {{- else }}
-            - name: POSTGRES_PASSWORD
-              valueFrom:
-                secretKeyRef:
-                  name: {{ template "postgresql.secretName" . }}
-                  key: postgresql-password
-            {{- end }}
-            {{- if (include "postgresql.database" .) }}
-            - name: POSTGRES_DB
-              value: {{ (include "postgresql.database" .) | quote }}
-            {{- end }}
-            {{- if .Values.extraEnv }}
-            {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }}
-            {{- end }}
-            - name: POSTGRESQL_ENABLE_LDAP
-              value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }}
-            {{- if .Values.ldap.enabled }}
-            - name: POSTGRESQL_LDAP_SERVER
-              value: {{ .Values.ldap.server }}
-            - name: POSTGRESQL_LDAP_PORT
-              value: {{ .Values.ldap.port | quote }}
-            - name: POSTGRESQL_LDAP_SCHEME
-              value: {{ .Values.ldap.scheme }}
-            {{- if .Values.ldap.tls }}
-            - name: POSTGRESQL_LDAP_TLS
-              value: "1"
-            {{- end}}
-            - name: POSTGRESQL_LDAP_PREFIX
-              value: {{ .Values.ldap.prefix | quote }}
-            - name: POSTGRESQL_LDAP_SUFFIX
-              value: {{ .Values.ldap.suffix | quote}}
-            - name: POSTGRESQL_LDAP_BASE_DN
-              value: {{ .Values.ldap.baseDN }}
-            - name: POSTGRESQL_LDAP_BIND_DN
-              value: {{ .Values.ldap.bindDN }}
-            {{- if (not (empty .Values.ldap.bind_password)) }}
-            - name: POSTGRESQL_LDAP_BIND_PASSWORD
-              valueFrom:
-                secretKeyRef:
-                  name: {{ template "postgresql.secretName" . }}
-                  key: postgresql-ldap-password
-            {{- end}}
-            - name: POSTGRESQL_LDAP_SEARCH_ATTR
-              value: {{ .Values.ldap.search_attr }}
-            - name: POSTGRESQL_LDAP_SEARCH_FILTER
-              value: {{ .Values.ldap.search_filter }}
-            - name: POSTGRESQL_LDAP_URL
-              value: {{ .Values.ldap.url }}
-            {{- end}}
-          ports:
-            - name: tcp-postgresql
-              containerPort: {{ template "postgresql.port" . }}
-          {{- if .Values.livenessProbe.enabled }}
-          livenessProbe:
-            exec:
-              command:
-                - /bin/sh
-                - -c
-                {{- if (include "postgresql.database" .) }}
-                - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }}
-                {{- else }}
-                - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }}
-                {{- end }}
-            initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
-            periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
-            timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
-            successThreshold: {{ .Values.livenessProbe.successThreshold }}
-            failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
-          {{- end }}
-          {{- if .Values.readinessProbe.enabled }}
-          readinessProbe:
-            exec:
-              command:
-                - /bin/sh
-                - -c
-                - -e
-                {{- include "postgresql.readinessProbeCommand" . | nindent 16 }}
-            initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
-            periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
-            timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
-            successThreshold: {{ .Values.readinessProbe.successThreshold }}
-            failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
-          {{- end }}
-          volumeMounts:
-            {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }}
-            - name: custom-init-scripts
-              mountPath: /docker-entrypoint-initdb.d/
-            {{- end }}
-            {{- if .Values.initdbScriptsSecret }}
-            - name: custom-init-scripts-secret
-              mountPath: /docker-entrypoint-initdb.d/secret
-            {{- end }}
-            {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}
-            - name: postgresql-extended-config
-              mountPath: /bitnami/postgresql/conf/conf.d/
-            {{- end }}
-            {{- if .Values.usePasswordFile }}
-            - name: postgresql-password
-              mountPath: /opt/bitnami/postgresql/secrets/
-            {{- end }}
-            {{- if .Values.shmVolume.enabled }}
-            - name: dshm
-              mountPath: /dev/shm
-            {{- end }}
-            {{- if .Values.persistence.enabled }}
-            - name: data
-              mountPath: {{ .Values.persistence.mountPath }}
-              subPath: {{ .Values.persistence.subPath }}
-            {{- end }}
-            {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }}
-            - name: postgresql-config
-              mountPath: /bitnami/postgresql/conf
-            {{- end }}
-            {{- if .Values.master.extraVolumeMounts }}
-            {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }}
-            {{- end }}
-{{- if .Values.metrics.enabled }}
-        - name: metrics
-          image: {{ template "postgresql.metrics.image" . }}
-          imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
-         {{- if .Values.metrics.securityContext.enabled }}
-          securityContext:
-            runAsUser: {{ .Values.metrics.securityContext.runAsUser }}
-        {{- end }}
-          env:
-            {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }}
-            - name: DATA_SOURCE_URI
-              value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.port" .)) $database | quote }}
-            {{- if .Values.usePasswordFile }}
-            - name: DATA_SOURCE_PASS_FILE
-              value: "/opt/bitnami/postgresql/secrets/postgresql-password"
-            {{- else }}
-            - name: DATA_SOURCE_PASS
-              valueFrom:
-                secretKeyRef:
-                  name: {{ template "postgresql.secretName" . }}
-                  key: postgresql-password
-            {{- end }}
-            - name: DATA_SOURCE_USER
-              value: {{ template "postgresql.username" . }}
-          {{- if .Values.livenessProbe.enabled }}
-          livenessProbe:
-            httpGet:
-              path: /
-              port: http-metrics
-            initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}
-            periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}
-            timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}
-            successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}
-            failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}
-          {{- end }}
-          {{- if .Values.readinessProbe.enabled }}
-          readinessProbe:
-            httpGet:
-              path: /
-              port: http-metrics
-            initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}
-            periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}
-            timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}
-            successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}
-            failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}
-          {{- end }}
-          volumeMounts:
-            {{- if .Values.usePasswordFile }}
-            - name: postgresql-password
-              mountPath: /opt/bitnami/postgresql/secrets/
-            {{- end }}
-            {{- if .Values.metrics.customMetrics }}
-            - name: custom-metrics
-              mountPath: /conf
-              readOnly: true
-          args: ["--extend.query-path", "/conf/custom-metrics.yaml"]
-            {{- end }}
-          ports:
-            - name: http-metrics
-              containerPort: 9187
-          {{- if .Values.metrics.resources }}
-          resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
-          {{- end }}
-{{- end }}
-      volumes:
-        {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}}
-        - name: postgresql-config
-          configMap:
-            name: {{ template "postgresql.configurationCM" . }}
-        {{- end }}
-        {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}
-        - name: postgresql-extended-config
-          configMap:
-            name: {{ template "postgresql.extendedConfigurationCM" . }}
-        {{- end }}
-        {{- if .Values.usePasswordFile }}
-        - name: postgresql-password
-          secret:
-            secretName: {{ template "postgresql.secretName" . }}
-        {{- end }}
-        {{- if  or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }}
-        - name: custom-init-scripts
-          configMap:
-            name: {{ template "postgresql.initdbScriptsCM" . }}
-        {{- end }}
-        {{- if .Values.initdbScriptsSecret }}
-        - name: custom-init-scripts-secret
-          secret:
-            secretName: {{ template "postgresql.initdbScriptsSecret" . }}
-        {{- end }}
-        {{- if .Values.master.extraVolumes }}
-        {{- toYaml .Values.master.extraVolumes | nindent 8 }}
-        {{- end }}
-        {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}
-        - name: custom-metrics
-          configMap:
-            name: {{ template "postgresql.metricsCM" . }}
-        {{- end }}
-        {{- if .Values.shmVolume.enabled }}
-        - name: dshm
-          emptyDir:
-            medium: Memory
-            sizeLimit: 1Gi
-        {{- end }}
-{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }}
-        - name: data
-          persistentVolumeClaim:
-{{- with .Values.persistence.existingClaim }}
-            claimName: {{ tpl . $ }}
-{{- end }}
-{{- else if not .Values.persistence.enabled }}
-        - name: data
-          emptyDir: {}
-{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
-  volumeClaimTemplates:
-    - metadata:
-        name: data
-      {{- with .Values.persistence.annotations }}
-        annotations:
-        {{- range $key, $value := . }}
-          {{ $key }}: {{ $value }}
-        {{- end }}
-      {{- end }}
-      spec:
-        accessModes:
-        {{- range .Values.persistence.accessModes }}
-          - {{ . | quote }}
-        {{- end }}
-        resources:
-          requests:
-            storage: {{ .Values.persistence.size | quote }}
-        {{ include "postgresql.storageClass" . }}
-{{- end }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/svc-headless.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/svc-headless.yaml
deleted file mode 100755 (executable)
index 5c71f46..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ template "postgresql.fullname" . }}-headless
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-spec:
-  type: ClusterIP
-  clusterIP: None
-  ports:
-    - name: tcp-postgresql
-      port: {{ template "postgresql.port" . }}
-      targetPort: tcp-postgresql
-  selector:
-    app: {{ template "postgresql.name" . }}
-    release: {{ .Release.Name | quote }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/svc-read.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/svc-read.yaml
deleted file mode 100755 (executable)
index d9492e2..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-{{- if .Values.replication.enabled }}
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ template "postgresql.fullname" . }}-read
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-{{- with .Values.service.annotations }}
-  annotations:
-{{ toYaml . | indent 4 }}
-{{- end }}
-spec:
-  type: {{ .Values.service.type }}
-  {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }}
-  loadBalancerIP: {{ .Values.service.loadBalancerIP }}
-  {{- end }}
-  ports:
-    - name: tcp-postgresql
-      port:  {{ template "postgresql.port" . }}
-      targetPort: tcp-postgresql
-      {{- if .Values.service.nodePort }}
-      nodePort: {{ .Values.service.nodePort }}
-      {{- end }}
-  selector:
-    app: {{ template "postgresql.name" . }}
-    release: {{ .Release.Name | quote }}
-    role: slave
-{{- end }}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/templates/svc.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/templates/svc.yaml
deleted file mode 100755 (executable)
index 0baea4a..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ template "postgresql.fullname" . }}
-  labels:
-    app: {{ template "postgresql.name" . }}
-    chart: {{ template "postgresql.chart" . }}
-    release: {{ .Release.Name | quote }}
-    heritage: {{ .Release.Service | quote }}
-{{- with .Values.service.annotations }}
-  annotations:
-{{ tpl (toYaml .) $ | indent 4 }}
-{{- end }}
-spec:
-  type: {{ .Values.service.type }}
-  {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }}
-  loadBalancerIP: {{ .Values.service.loadBalancerIP }}
-  {{- end }}
-  {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }}
-  loadBalancerSourceRanges:
-  {{ with .Values.service.loadBalancerSourceRanges }}
-{{ toYaml . | indent 4 }}
-{{- end }}
-  {{- end }}
-  {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }}
-  clusterIP: {{ .Values.service.clusterIP }}
-  {{- end }}
-  ports:
-    - name: tcp-postgresql
-      port: {{ template "postgresql.port" . }}
-      targetPort: tcp-postgresql
-      {{- if .Values.service.nodePort }}
-      nodePort: {{ .Values.service.nodePort }}
-      {{- end }}
-  selector:
-    app: {{ template "postgresql.name" . }}
-    release: {{ .Release.Name | quote }}
-    role: master
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/values-production.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/values-production.yaml
deleted file mode 100755 (executable)
index 43508f3..0000000
+++ /dev/null
@@ -1,476 +0,0 @@
-## Global Docker image parameters
-## Please, note that this will override the image parameters, including dependencies, configured to use the global value
-## Current available global Docker image parameters: imageRegistry and imagePullSecrets
-##
-global:
-  postgresql: {}
-#   imageRegistry: myRegistryName
-#   imagePullSecrets:
-#     - myRegistryKeySecretName
-#   storageClass: myStorageClass
-
-## Bitnami PostgreSQL image version
-## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
-##
-image:
-  registry: docker.io
-  repository: bitnami/postgresql
-  tag: 11.6.0-debian-9-r0
-  ## Specify a imagePullPolicy
-  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
-  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
-  ##
-  pullPolicy: IfNotPresent
-  ## Optionally specify an array of imagePullSecrets.
-  ## Secrets must be manually created in the namespace.
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-  ##
-  # pullSecrets:
-  #   - myRegistryKeySecretName
-
-  ## Set to true if you would like to see extra information on logs
-  ## It turns BASH and NAMI debugging in minideb
-  ## ref:  https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
-  debug: false
-
-## String to partially override postgresql.fullname template (will maintain the release name)
-##
-# nameOverride:
-
-## String to fully override postgresql.fullname template
-##
-# fullnameOverride:
-
-##
-## Init containers parameters:
-## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
-##
-volumePermissions:
-  enabled: true
-  image:
-    registry: docker.io
-    repository: bitnami/minideb
-    tag: stretch
-    ## Specify a imagePullPolicy
-    ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
-    ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
-    ##
-    pullPolicy: Always
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ##
-    # pullSecrets:
-    #   - myRegistryKeySecretName
-  ## Init container Security Context
-  securityContext:
-    runAsUser: 0
-
-## Use an alternate scheduler, e.g. "stork".
-## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-##
-# schedulerName:
-
-## Pod Security Context
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-##
-securityContext:
-  enabled: true
-  fsGroup: 1001
-  runAsUser: 1001
-
-## Pod Service Account
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-serviceAccount:
-  enabled: false
-  ## Name of an already existing service account. Setting this value disables the automatic service account creation.
-  # name:
-
-replication:
-  enabled: true
-  user: repl_user
-  password: repl_password
-  slaveReplicas: 2
-  ## Set synchronous commit mode: on, off, remote_apply, remote_write and local
-  ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL
-  synchronousCommit: "on"
-  ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication
-  ## NOTE: It cannot be > slaveReplicas
-  numSynchronousReplicas: 1
-  ## Replication Cluster application name. Useful for defining multiple replication policies
-  applicationName: my_application
-
-## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`)
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!)
-# postgresqlPostgresPassword:
-
-## PostgreSQL user (has superuser privileges if username is `postgres`)
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
-postgresqlUsername: postgres
-
-## PostgreSQL password
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
-##
-# postgresqlPassword:
-
-## PostgreSQL password using existing secret
-## existingSecret: secret
-
-## Mount PostgreSQL secret as a file instead of passing environment variable
-# usePasswordFile: false
-
-## Create a database
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run
-##
-# postgresqlDatabase:
-
-## PostgreSQL data dir
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
-##
-postgresqlDataDir: /bitnami/postgresql/data
-
-## An array to add extra environment variables
-## For example:
-## extraEnv:
-##   - name: FOO
-##     value: "bar"
-##
-# extraEnv:
-extraEnv: []
-
-## Specify extra initdb args
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
-##
-# postgresqlInitdbArgs:
-
-## Specify a custom location for the PostgreSQL transaction log
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
-##
-# postgresqlInitdbWalDir:
-
-## PostgreSQL configuration
-## Specify runtime configuration parameters as a dict, using camelCase, e.g.
-## {"sharedBuffers": "500MB"}
-## Alternatively, you can put your postgresql.conf under the files/ directory
-## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
-##
-# postgresqlConfiguration:
-
-## PostgreSQL extended configuration
-## As above, but _appended_ to the main configuration
-## Alternatively, you can put your *.conf under the files/conf.d/ directory
-## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
-##
-# postgresqlExtendedConf:
-
-## PostgreSQL client authentication configuration
-## Specify content for pg_hba.conf
-## Default: do not create pg_hba.conf
-## Alternatively, you can put your pg_hba.conf under the files/ directory
-# pgHbaConfiguration: |-
-#   local all all trust
-#   host all all localhost trust
-#   host mydatabase mysuser 192.168.0.0/24 md5
-
-## ConfigMap with PostgreSQL configuration
-## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration
-# configurationConfigMap:
-
-## ConfigMap with PostgreSQL extended configuration
-# extendedConfConfigMap:
-
-## initdb scripts
-## Specify dictionary of scripts to be run at first boot
-## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
-##
-# initdbScripts:
-#   my_init_script.sh: |
-#      #!/bin/sh
-#      echo "Do something."
-
-## Specify the PostgreSQL username and password to execute the initdb scripts
-# initdbUser:
-# initdbPassword:
-
-## ConfigMap with scripts to be run at first boot
-## NOTE: This will override initdbScripts
-# initdbScriptsConfigMap:
-
-## Secret with scripts to be run at first boot (in case it contains sensitive information)
-## NOTE: This can work along initdbScripts or initdbScriptsConfigMap
-# initdbScriptsSecret:
-
-## Optional duration in seconds the pod needs to terminate gracefully.
-## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
-##
-# terminationGracePeriodSeconds: 30
-
-## LDAP configuration
-##
-ldap:
-  enabled: false
-  url: ""
-  server: ""
-  port: ""
-  prefix: ""
-  suffix: ""
-  baseDN: ""
-  bindDN: ""
-  bind_password:
-  search_attr: ""
-  search_filter: ""
-  scheme: ""
-  tls: false
-
-## PostgreSQL service configuration
-service:
-  ## PosgresSQL service type
-  type: ClusterIP
-  # clusterIP: None
-  port: 5432
-
-  ## Specify the nodePort value for the LoadBalancer and NodePort service types.
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-  ##
-  # nodePort:
-
-  ## Provide any additional annotations which may be required.
-  ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
-  annotations: {}
-  ## Set the LoadBalancer service type to internal only.
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
-  ##
-  # loadBalancerIP:
-
-  ## Load Balancer sources
-  ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-  ##
-  # loadBalancerSourceRanges:
-  # - 10.10.10.0/24
-
-## Start master and slave(s) pod(s) without limitations on shm memory.
-## By default docker and containerd (and possibly other container runtimes)
-## limit `/dev/shm` to `64M` (see e.g. the
-## [docker issue](https://github.com/docker-library/postgres/issues/416) and the
-## [containerd issue](https://github.com/containerd/containerd/issues/3654),
-## which could be not enough if PostgreSQL uses parallel workers heavily.
-## If this option is present and value is `true`,
-## to the target database pod will be mounted a new tmpfs volume to remove
-## this limitation.
-shmVolume:
-  enabled: true
-
-## PostgreSQL data Persistent Volume Storage Class
-## If defined, storageClassName: <storageClass>
-## If set to "-", storageClassName: "", which disables dynamic provisioning
-## If undefined (the default) or set to null, no storageClassName spec is
-##   set, choosing the default provisioner.  (gp2 on AWS, standard on
-##   GKE, AWS & OpenStack)
-##
-persistence:
-  enabled: true
-  ## A manually managed Persistent Volume and Claim
-  ## If defined, PVC must be created manually before volume will be bound
-  ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart
-  ##
-  # existingClaim:
-
-  ## The path the volume will be mounted at, useful when using different
-  ## PostgreSQL images.
-  ##
-  mountPath: /bitnami/postgresql
-
-  ## The subdirectory of the volume to mount to, useful in dev environments
-  ## and one PV for multiple services.
-  ##
-  subPath: ""
-
-  # storageClass: "-"
-  accessModes:
-    - ReadWriteOnce
-  size: 8Gi
-  annotations: {}
-
-## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets
-## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-updateStrategy:
-  type: RollingUpdate
-
-##
-## PostgreSQL Master parameters
-##
-master:
-  ## Node, affinity, tolerations, and priorityclass settings for pod assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
-  ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
-  nodeSelector: {}
-  affinity: {}
-  tolerations: []
-  labels: {}
-  annotations: {}
-  podLabels: {}
-  podAnnotations: {}
-  priorityClassName: ""
-  ## Additional PostgreSQL Master Volume mounts
-  ##
-  extraVolumeMounts: []
-  ## Additional PostgreSQL Master Volumes
-  ##
-  extraVolumes: []
-
-##
-## PostgreSQL Slave parameters
-##
-slave:
-  ## Node, affinity, tolerations, and priorityclass settings for pod assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
-  ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
-  nodeSelector: {}
-  affinity: {}
-  tolerations: []
-  labels: {}
-  annotations: {}
-  podLabels: {}
-  podAnnotations: {}
-  priorityClassName: ""
-  ## Additional PostgreSQL Slave Volume mounts
-  ##
-  extraVolumeMounts: []
-  ## Additional PostgreSQL Slave Volumes
-  ##
-  extraVolumes: []
-
-## Configure resource requests and limits
-## ref: http://kubernetes.io/docs/user-guide/compute-resources/
-##
-resources:
-  requests:
-    memory: 256Mi
-    cpu: 250m
-
-networkPolicy:
-  ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
-  ##
-  enabled: false
-
-  ## The Policy model to apply. When set to false, only pods with the correct
-  ## client label will have network access to the port PostgreSQL is listening
-  ## on. When true, PostgreSQL will accept connections from any source
-  ## (with the correct destination port).
-  ##
-  allowExternal: true
-
-  ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
-  ## and that match other criteria, the ones that have the good label, can reach the DB.
-  ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this
-  ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added.
-  ##
-  # explicitNamespacesSelector:
-    # matchLabels:
-      # role: frontend
-    # matchExpressions:
-      # - {key: role, operator: In, values: [frontend]}
-
-## Configure extra options for liveness and readiness probes
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
-livenessProbe:
-  enabled: true
-  initialDelaySeconds: 30
-  periodSeconds: 10
-  timeoutSeconds: 5
-  failureThreshold: 6
-  successThreshold: 1
-
-readinessProbe:
-  enabled: true
-  initialDelaySeconds: 5
-  periodSeconds: 10
-  timeoutSeconds: 5
-  failureThreshold: 6
-  successThreshold: 1
-
-## Configure metrics exporter
-##
-metrics:
-  enabled: true
-  # resources: {}
-  service:
-    type: ClusterIP
-    annotations:
-      prometheus.io/scrape: "true"
-      prometheus.io/port: "9187"
-    loadBalancerIP:
-  serviceMonitor:
-    enabled: false
-    additionalLabels: {}
-    # namespace: monitoring
-    # interval: 30s
-    # scrapeTimeout: 10s
-  ## Custom PrometheusRule to be defined
-  ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
-  ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
-  prometheusRule:
-    enabled: false
-    additionalLabels: {}
-    namespace: ""
-    rules: []
-      ## These are just examples rules, please adapt them to your needs.
-      ## Make sure to constraint the rules to the current postgresql service.
-      # - alert: HugeReplicationLag
-      #   expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1
-      #   for: 1m
-      #   labels:
-      #     severity: critical
-      #   annotations:
-      #     description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s).
-      #     summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s).
-  image:
-    registry: docker.io
-    repository: bitnami/postgres-exporter
-    tag: 0.7.0-debian-9-r12
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ##
-    # pullSecrets:
-    #   - myRegistryKeySecretName
-  ## Define additional custom metrics
-  ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file
-  # customMetrics:
-  #   pg_database:
-  #     query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')"
-  #     metrics:
-  #       - name:
-  #           usage: "LABEL"
-  #           description: "Name of the database"
-  #       - size_bytes:
-  #           usage: "GAUGE"
-  #           description: "Size of the database in bytes"
-  ## Pod Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-  ##
-  securityContext:
-    enabled: false
-    runAsUser: 1001
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
-  ## Configure extra options for liveness and readiness probes
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/values.schema.json b/helm/infrastructure/subcharts/kong/charts/postgresql/values.schema.json
deleted file mode 100755 (executable)
index ac2de6e..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-{
-  "$schema": "http://json-schema.org/schema#",
-  "type": "object",
-  "properties": {
-    "postgresqlUsername": {
-      "type": "string",
-      "title": "Admin user",
-      "form": true
-    },
-    "postgresqlPassword": {
-      "type": "string",
-      "title": "Password",
-      "form": true
-    },
-    "persistence": {
-      "type": "object",
-      "properties": {
-        "size": {
-          "type": "string",
-          "title": "Persistent Volume Size",
-          "form": true,
-          "render": "slider",
-          "sliderMin": 1,
-          "sliderMax": 100,
-          "sliderUnit": "Gi"
-        }
-      }
-    },
-    "resources": {
-      "type": "object",
-      "title": "Required Resources",
-      "description": "Configure resource requests",
-      "form": true,
-      "properties": {
-        "requests": {
-          "type": "object",
-          "properties": {
-            "memory": {
-              "type": "string",
-              "form": true,
-              "render": "slider",
-              "title": "Memory Request",
-              "sliderMin": 10,
-              "sliderMax": 2048,
-              "sliderUnit": "Mi"
-            },
-            "cpu": {
-              "type": "string",
-              "form": true,
-              "render": "slider",
-              "title": "CPU Request",
-              "sliderMin": 10,
-              "sliderMax": 2000,
-              "sliderUnit": "m"
-            }
-          }
-        }
-      }
-    },
-    "replication": {
-      "type": "object",
-      "form": true,
-      "title": "Replication Details",
-      "properties": {
-        "enabled": {
-          "type": "boolean",
-          "title": "Enable Replication",
-          "form": true
-        },
-        "slaveReplicas": {
-          "type": "integer",
-          "title": "Slave Replicas",
-          "form": true,
-          "hidden": {
-            "condition": false,
-            "value": "replication.enabled"
-          }
-        }
-      }
-    },
-    "volumePermissions": {
-      "type": "object",
-      "properties": {
-        "enabled": {
-          "type": "boolean",
-          "form": true,
-          "title": "Enable Init Containers",
-          "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup"
-        }
-      }
-    },
-    "metrics": {
-      "type": "object",
-      "properties": {
-        "enabled": {
-          "type": "boolean",
-          "title": "Configure metrics exporter",
-          "form": true
-        }
-      }
-    }
-  }
-}
diff --git a/helm/infrastructure/subcharts/kong/charts/postgresql/values.yaml b/helm/infrastructure/subcharts/kong/charts/postgresql/values.yaml
deleted file mode 100755 (executable)
index 0fc14b8..0000000
+++ /dev/null
@@ -1,484 +0,0 @@
-## Global Docker image parameters
-## Please, note that this will override the image parameters, including dependencies, configured to use the global value
-## Current available global Docker image parameters: imageRegistry and imagePullSecrets
-##
-global:
-  postgresql: {}
-#   imageRegistry: myRegistryName
-#   imagePullSecrets:
-#     - myRegistryKeySecretName
-#   storageClass: myStorageClass
-
-## Bitnami PostgreSQL image version
-## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
-##
-image:
-  registry: docker.io
-  repository: bitnami/postgresql
-  tag: 11.6.0-debian-9-r0
-  ## Specify a imagePullPolicy
-  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
-  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
-  ##
-  pullPolicy: IfNotPresent
-  ## Optionally specify an array of imagePullSecrets.
-  ## Secrets must be manually created in the namespace.
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-  ##
-  # pullSecrets:
-  #   - myRegistryKeySecretName
-
-  ## Set to true if you would like to see extra information on logs
-  ## It turns BASH and NAMI debugging in minideb
-  ## ref:  https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
-  debug: false
-
-## String to partially override postgresql.fullname template (will maintain the release name)
-##
-# nameOverride:
-
-## String to fully override postgresql.fullname template
-##
-# fullnameOverride:
-
-##
-## Init containers parameters:
-## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
-##
-volumePermissions:
-  enabled: true
-  image:
-    registry: docker.io
-    repository: bitnami/minideb
-    tag: stretch
-    ## Specify a imagePullPolicy
-    ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
-    ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
-    ##
-    pullPolicy: Always
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ##
-    # pullSecrets:
-    #   - myRegistryKeySecretName
-  ## Init container Security Context
-  securityContext:
-    runAsUser: 0
-
-## Use an alternate scheduler, e.g. "stork".
-## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-##
-# schedulerName:
-
-## Pod Security Context
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-##
-securityContext:
-  enabled: true
-  fsGroup: 1001
-  runAsUser: 1001
-
-## Pod Service Account
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-serviceAccount:
-  enabled: false
-  ## Name of an already existing service account. Setting this value disables the automatic service account creation.
-  # name:
-
-replication:
-  enabled: false
-  user: repl_user
-  password: repl_password
-  slaveReplicas: 1
-  ## Set synchronous commit mode: on, off, remote_apply, remote_write and local
-  ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL
-  synchronousCommit: "off"
-  ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication
-  ## NOTE: It cannot be > slaveReplicas
-  numSynchronousReplicas: 0
-  ## Replication Cluster application name. Useful for defining multiple replication policies
-  applicationName: my_application
-
-## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`)
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!)
-# postgresqlPostgresPassword:
-
-## PostgreSQL user (has superuser privileges if username is `postgres`)
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
-postgresqlUsername: postgres
-
-## PostgreSQL password
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
-##
-# postgresqlPassword:
-
-## PostgreSQL password using existing secret
-## existingSecret: secret
-
-## Mount PostgreSQL secret as a file instead of passing environment variable
-# usePasswordFile: false
-
-## Create a database
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run
-##
-# postgresqlDatabase:
-
-## PostgreSQL data dir
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
-##
-postgresqlDataDir: /bitnami/postgresql/data
-
-## An array to add extra environment variables
-## For example:
-## extraEnv:
-##   - name: FOO
-##     value: "bar"
-##
-# extraEnv:
-extraEnv: []
-
-## Specify extra initdb args
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
-##
-# postgresqlInitdbArgs:
-
-## Specify a custom location for the PostgreSQL transaction log
-## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
-##
-# postgresqlInitdbWalDir:
-
-## PostgreSQL configuration
-## Specify runtime configuration parameters as a dict, using camelCase, e.g.
-## {"sharedBuffers": "500MB"}
-## Alternatively, you can put your postgresql.conf under the files/ directory
-## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
-##
-# postgresqlConfiguration:
-
-## PostgreSQL extended configuration
-## As above, but _appended_ to the main configuration
-## Alternatively, you can put your *.conf under the files/conf.d/ directory
-## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
-##
-# postgresqlExtendedConf:
-
-## PostgreSQL client authentication configuration
-## Specify content for pg_hba.conf
-## Default: do not create pg_hba.conf
-## Alternatively, you can put your pg_hba.conf under the files/ directory
-# pgHbaConfiguration: |-
-#   local all all trust
-#   host all all localhost trust
-#   host mydatabase mysuser 192.168.0.0/24 md5
-
-## ConfigMap with PostgreSQL configuration
-## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration
-# configurationConfigMap:
-
-## ConfigMap with PostgreSQL extended configuration
-# extendedConfConfigMap:
-
-## initdb scripts
-## Specify dictionary of scripts to be run at first boot
-## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
-##
-# initdbScripts:
-#   my_init_script.sh: |
-#      #!/bin/sh
-#      echo "Do something."
-
-## ConfigMap with scripts to be run at first boot
-## NOTE: This will override initdbScripts
-# initdbScriptsConfigMap:
-
-## Secret with scripts to be run at first boot (in case it contains sensitive information)
-## NOTE: This can work along initdbScripts or initdbScriptsConfigMap
-# initdbScriptsSecret:
-
-## Specify the PostgreSQL username and password to execute the initdb scripts
-# initdbUser:
-# initdbPassword:
-
-## Optional duration in seconds the pod needs to terminate gracefully.
-## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
-##
-# terminationGracePeriodSeconds: 30
-
-## LDAP configuration
-##
-ldap:
-  enabled: false
-  url: ""
-  server: ""
-  port: ""
-  prefix: ""
-  suffix: ""
-  baseDN: ""
-  bindDN: ""
-  bind_password:
-  search_attr: ""
-  search_filter: ""
-  scheme: ""
-  tls: false
-
-## PostgreSQL service configuration
-service:
-  ## PosgresSQL service type
-  type: ClusterIP
-  # clusterIP: None
-  port: 5432
-
-  ## Specify the nodePort value for the LoadBalancer and NodePort service types.
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-  ##
-  # nodePort:
-
-  ## Provide any additional annotations which may be required.
-  ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
-  annotations: {}
-  ## Set the LoadBalancer service type to internal only.
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
-  ##
-  # loadBalancerIP:
-
-  ## Load Balancer sources
-  ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-  ##
-  # loadBalancerSourceRanges:
-  # - 10.10.10.0/24
-
-## Start master and slave(s) pod(s) without limitations on shm memory.
-## By default docker and containerd (and possibly other container runtimes)
-## limit `/dev/shm` to `64M` (see e.g. the
-## [docker issue](https://github.com/docker-library/postgres/issues/416) and the
-## [containerd issue](https://github.com/containerd/containerd/issues/3654),
-## which could be not enough if PostgreSQL uses parallel workers heavily.
-## If this option is present and value is `true`,
-## to the target database pod will be mounted a new tmpfs volume to remove
-## this limitation.
-shmVolume:
-  enabled: true
-
-## PostgreSQL data Persistent Volume Storage Class
-## If defined, storageClassName: <storageClass>
-## If set to "-", storageClassName: "", which disables dynamic provisioning
-## If undefined (the default) or set to null, no storageClassName spec is
-##   set, choosing the default provisioner.  (gp2 on AWS, standard on
-##   GKE, AWS & OpenStack)
-##
-persistence:
-  enabled: true
-  ## A manually managed Persistent Volume and Claim
-  ## If defined, PVC must be created manually before volume will be bound
-  ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart
-  ##
-  # existingClaim:
-
-  ## The path the volume will be mounted at, useful when using different
-  ## PostgreSQL images.
-  ##
-  mountPath: /bitnami/postgresql
-
-  ## The subdirectory of the volume to mount to, useful in dev environments
-  ## and one PV for multiple services.
-  ##
-  subPath: ""
-
-  # storageClass: "-"
-  accessModes:
-    - ReadWriteOnce
-  size: 8Gi
-  annotations: {}
-
-## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets
-## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-updateStrategy:
-  type: RollingUpdate
-
-##
-## PostgreSQL Master parameters
-##
-master:
-  ## Node, affinity, tolerations, and priorityclass settings for pod assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
-  ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
-  nodeSelector: {}
-  affinity: {}
-  tolerations: []
-  labels: {}
-  annotations: {}
-  podLabels: {}
-  podAnnotations: {}
-  priorityClassName: ""
-  extraInitContainers: |
-  # - name: do-something
-  #   image: busybox
-  #   command: ['do', 'something']
-  ## Additional PostgreSQL Master Volume mounts
-  ##
-  extraVolumeMounts: []
-  ## Additional PostgreSQL Master Volumes
-  ##
-  extraVolumes: []
-
-##
-## PostgreSQL Slave parameters
-##
-slave:
-  ## Node, affinity, tolerations, and priorityclass settings for pod assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
-  ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
-  nodeSelector: {}
-  affinity: {}
-  tolerations: []
-  labels: {}
-  annotations: {}
-  podLabels: {}
-  podAnnotations: {}
-  priorityClassName: ""
-  extraInitContainers: |
-  # - name: do-something
-  #   image: busybox
-  #   command: ['do', 'something']
-  ## Additional PostgreSQL Slave Volume mounts
-  ##
-  extraVolumeMounts: []
-  ## Additional PostgreSQL Slave Volumes
-  ##
-  extraVolumes: []
-
-## Configure resource requests and limits
-## ref: http://kubernetes.io/docs/user-guide/compute-resources/
-##
-resources:
-  requests:
-    memory: 256Mi
-    cpu: 250m
-
-networkPolicy:
-  ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
-  ##
-  enabled: false
-
-  ## The Policy model to apply. When set to false, only pods with the correct
-  ## client label will have network access to the port PostgreSQL is listening
-  ## on. When true, PostgreSQL will accept connections from any source
-  ## (with the correct destination port).
-  ##
-  allowExternal: true
-
-  ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
-  ## and that match other criteria, the ones that have the good label, can reach the DB.
-  ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this
-  ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added.
-  ##
-  # explicitNamespacesSelector:
-    # matchLabels:
-      # role: frontend
-    # matchExpressions:
-      # - {key: role, operator: In, values: [frontend]}
-
-## Configure extra options for liveness and readiness probes
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
-livenessProbe:
-  enabled: true
-  initialDelaySeconds: 30
-  periodSeconds: 10
-  timeoutSeconds: 5
-  failureThreshold: 6
-  successThreshold: 1
-
-readinessProbe:
-  enabled: true
-  initialDelaySeconds: 5
-  periodSeconds: 10
-  timeoutSeconds: 5
-  failureThreshold: 6
-  successThreshold: 1
-
-## Configure metrics exporter
-##
-metrics:
-  enabled: false
-  # resources: {}
-  service:
-    type: ClusterIP
-    annotations:
-      prometheus.io/scrape: "true"
-      prometheus.io/port: "9187"
-    loadBalancerIP:
-  serviceMonitor:
-    enabled: false
-    additionalLabels: {}
-    # namespace: monitoring
-    # interval: 30s
-    # scrapeTimeout: 10s
-  ## Custom PrometheusRule to be defined
-  ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
-  ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
-  prometheusRule:
-    enabled: false
-    additionalLabels: {}
-    namespace: ""
-    rules: []
-      ## These are just examples rules, please adapt them to your needs.
-      ## Make sure to constraint the rules to the current postgresql service.
-      # - alert: HugeReplicationLag
-      #   expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1
-      #   for: 1m
-      #   labels:
-      #     severity: critical
-      #   annotations:
-      #     description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s).
-      #     summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s).
-  image:
-    registry: docker.io
-    repository: bitnami/postgres-exporter
-    tag: 0.7.0-debian-9-r12
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ##
-    # pullSecrets:
-    #   - myRegistryKeySecretName
-  ## Define additional custom metrics
-  ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file
-  # customMetrics:
-  #   pg_database:
-  #     query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')"
-  #     metrics:
-  #       - name:
-  #           usage: "LABEL"
-  #           description: "Name of the database"
-  #       - size_bytes:
-  #           usage: "GAUGE"
-  #           description: "Size of the database in bytes"
-  ## Pod Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-  ##
-  securityContext:
-    enabled: false
-    runAsUser: 1001
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
-  ## Configure extra options for liveness and readiness probes
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
diff --git a/helm/infrastructure/subcharts/kong/requirements.yaml b/helm/infrastructure/subcharts/kong/requirements.yaml
deleted file mode 100755 (executable)
index 98315ed..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-dependencies:
-- name: postgresql
-  version: ~8.1.0
-  repository: https://kubernetes-charts.storage.googleapis.com/
-  condition: postgresql.enabled
old mode 100755 (executable)
new mode 100644 (file)
index 6d7484e..3d64d8c
@@ -88,15 +88,15 @@ proxy:
   # HTTP plain-text traffic
   http:
     enabled: true
-    servicePort: 80
-    containerPort: 8000
+    servicePort: 32080
+    containerPort: 32080
     # Set a nodePort which is available if service type is NodePort
     nodePort: 32080
 
   tls:
     enabled: true
-    servicePort: 443
-    containerPort: 8443
+    servicePort: 32443
+    containerPort: 32443
     # Set a target port for the TLS port in proxy service, useful when using TLS
     # termination on an ELB.
     # overrideServiceTargetPort: 8000
diff --git a/helm/infrastructure/subcharts/prometheus/.helmignore b/helm/infrastructure/subcharts/prometheus/.helmignore
new file mode 100644 (file)
index 0000000..825c007
--- /dev/null
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+
+OWNERS
diff --git a/helm/infrastructure/subcharts/prometheus/Chart.yaml b/helm/infrastructure/subcharts/prometheus/Chart.yaml
new file mode 100644 (file)
index 0000000..ed1faaa
--- /dev/null
@@ -0,0 +1,20 @@
+apiVersion: v1
+appVersion: 2.18.1
+description: Prometheus is a monitoring system and time series database.
+engine: gotpl
+home: https://prometheus.io/
+icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
+maintainers:
+- email: gianrubio@gmail.com
+  name: gianrubio
+- email: zanhsieh@gmail.com
+  name: zanhsieh
+name: prometheus
+sources:
+- https://github.com/prometheus/alertmanager
+- https://github.com/prometheus/prometheus
+- https://github.com/prometheus/pushgateway
+- https://github.com/prometheus/node_exporter
+- https://github.com/kubernetes/kube-state-metrics
+tillerVersion: '>=2.8.0'
+version: 11.3.0
diff --git a/helm/infrastructure/subcharts/prometheus/README.md b/helm/infrastructure/subcharts/prometheus/README.md
new file mode 100644 (file)
index 0000000..a55f34c
--- /dev/null
@@ -0,0 +1,478 @@
+# Prometheus
+
+[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true.
+
+## TL;DR;
+
+```console
+$ helm install stable/prometheus
+```
+
+## Introduction
+
+This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+## Prerequisites
+
+- Kubernetes 1.3+ with Beta APIs enabled
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+$ helm install --name my-release stable/prometheus
+```
+
+The command deploys Prometheus on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Prometheus 2.x
+
+Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/)
+
+Users of this chart will need to update their alerting rules to the new format before they can upgrade.
+
+## Upgrading from previous chart versions.
+
+Version 9.0 adds a new option to enable or disable the Prometheus Server.
+This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment.
+To install the server `server.enabled` must be set to `true`.
+
+As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data.
+
+### Example migration
+
+Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following:
+
+1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below:
+
+       ```
+       alertmanager:
+         enabled: false
+       alertmanagerFiles:
+         alertmanager.yml: ""
+       kubeStateMetrics:
+         enabled: false
+       nodeExporter:
+         enabled: false
+       pushgateway:
+         enabled: false
+       server:
+         extraArgs:
+           storage.local.retention: 720h
+       serverFiles:
+         alerts: ""
+         prometheus.yml: ""
+         rules: ""
+       ```
+
+1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target.
+
+   ```
+         prometheus.yml:
+           ...
+           remote_read:
+           - url: http://prometheus-old/api/v1/read
+           ...
+   ```
+
+   Old data will be available when you query the new prometheus instance.
+
+## Scraping Pod Metrics via Annotations
+
+This chart uses a default configuration that causes prometheus
+to scrape a variety of kubernetes resource types, provided they have the correct annotations.
+In this section we describe how to configure pods to be scraped;
+for information on how other resource types can be scraped you can
+do a `helm template` to get the kubernetes resource definitions,
+and then reference the prometheus configuration in the ConfigMap against the prometheus documentation
+for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
+and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config).
+
+In order to get prometheus to scrape pods, you must add annotations to the the pods as below:
+
+```
+metadata:
+  annotations:
+    prometheus.io/scrape: "true"
+    prometheus.io/path: /metrics
+    prometheus.io/port: "8080"
+spec:
+...
+```
+
+You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from.
+`prometheus.io/port` should be set to the port that your pod serves metrics from.
+Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be
+enclosed in double quotes.
+
+## Configuration
+
+The following table lists the configurable parameters of the Prometheus chart and their default values.
+
+Parameter | Description | Default
+--------- | ----------- | -------
+`alertmanager.enabled` | If true, create alertmanager | `true`
+`alertmanager.name` | alertmanager container name | `alertmanager`
+`alertmanager.image.repository` | alertmanager container image repository | `prom/alertmanager`
+`alertmanager.image.tag` | alertmanager container image tag | `v0.20.0`
+`alertmanager.image.pullPolicy` | alertmanager container image pull policy | `IfNotPresent`
+`alertmanager.prefixURL` | The prefix slug at which the server can be accessed | ``
+`alertmanager.baseURL` | The external url at which the server can be accessed | `"http://localhost:9093"`
+`alertmanager.extraArgs` | Additional alertmanager container arguments | `{}`
+`alertmanager.extraSecretMounts` | Additional alertmanager Secret mounts | `[]`
+`alertmanager.configMapOverrideName` | Prometheus alertmanager ConfigMap override where full-name is `{{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}}` and setting this value will prevent the default alertmanager ConfigMap from being generated | `""`
+`alertmanager.configFromSecret` | The name of a secret in the same kubernetes namespace which contains the Alertmanager config, setting this value will prevent the default alertmanager ConfigMap from being generated | `""`
+`alertmanager.configFileName` | The configuration file name to be loaded to alertmanager. Must match the key within configuration loaded from ConfigMap/Secret. | `alertmanager.yml`
+`alertmanager.ingress.enabled` | If true, alertmanager Ingress will be created | `false`
+`alertmanager.ingress.annotations` | alertmanager Ingress annotations | `{}`
+`alertmanager.ingress.extraLabels` | alertmanager Ingress additional labels | `{}`
+`alertmanager.ingress.hosts` | alertmanager Ingress hostnames | `[]`
+`alertmanager.ingress.extraPaths` | Ingress extra paths to prepend to every alertmanager host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions) | `[]`
+`alertmanager.ingress.tls` | alertmanager Ingress TLS configuration (YAML) | `[]`
+`alertmanager.nodeSelector` | node labels for alertmanager pod assignment | `{}`
+`alertmanager.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
+`alertmanager.affinity` | pod affinity | `{}`
+`alertmanager.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false`
+`alertmanager.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1`
+`alertmanager.schedulerName` | alertmanager alternate scheduler name | `nil`
+`alertmanager.persistentVolume.enabled` | If true, alertmanager will create a Persistent Volume Claim | `true`
+`alertmanager.persistentVolume.accessModes` | alertmanager data Persistent Volume access modes | `[ReadWriteOnce]`
+`alertmanager.persistentVolume.annotations` | Annotations for alertmanager Persistent Volume Claim | `{}`
+`alertmanager.persistentVolume.existingClaim` | alertmanager data Persistent Volume existing claim name | `""`
+`alertmanager.persistentVolume.mountPath` | alertmanager data Persistent Volume mount root path | `/data`
+`alertmanager.persistentVolume.size` | alertmanager data Persistent Volume size | `2Gi`
+`alertmanager.persistentVolume.storageClass` | alertmanager data Persistent Volume Storage Class | `unset`
+`alertmanager.persistentVolume.volumeBindingMode` | alertmanager data Persistent Volume Binding Mode | `unset`
+`alertmanager.persistentVolume.subPath` | Subdirectory of alertmanager data Persistent Volume to mount | `""`
+`alertmanager.podAnnotations` | annotations to be added to alertmanager pods | `{}`
+`alertmanager.podLabels` | labels to be added to Prometheus AlertManager pods | `{}`
+`alertmanager.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` |
+`alertmanager.replicaCount` | desired number of alertmanager pods | `1`
+`alertmanager.statefulSet.enabled` | If true, use a statefulset instead of a deployment for pod management | `false`
+`alertmanager.statefulSet.podManagementPolicy` | podManagementPolicy of alertmanager pods | `OrderedReady`
+`alertmanager.statefulSet.headless.annotations` | annotations for alertmanager headless service | `{}`
+`alertmanager.statefulSet.headless.labels` | labels for alertmanager headless service | `{}`
+`alertmanager.statefulSet.headless.enableMeshPeer` | If true, enable the mesh peer endpoint for the headless service | `false`
+`alertmanager.statefulSet.headless.servicePort` | alertmanager headless service port | `80`
+`alertmanager.priorityClassName` | alertmanager priorityClassName | `nil`
+`alertmanager.resources` | alertmanager pod resource requests & limits | `{}`
+`alertmanager.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for Alert Manager containers | `{}`
+`alertmanager.service.annotations` | annotations for alertmanager service | `{}`
+`alertmanager.service.clusterIP` | internal alertmanager cluster service IP | `""`
+`alertmanager.service.externalIPs` | alertmanager service external IP addresses | `[]`
+`alertmanager.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""`
+`alertmanager.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]`
+`alertmanager.service.servicePort` | alertmanager service port | `80`
+`alertmanager.service.sessionAffinity` | Session Affinity for alertmanager service, can be `None` or `ClientIP` | `None`
+`alertmanager.service.type` | type of alertmanager service to create | `ClusterIP`
+`alertmanager.strategy` | Deployment strategy | `{ "type": "RollingUpdate" }`
+`alertmanagerFiles.alertmanager.yml` | Prometheus alertmanager configuration | example configuration
+`configmapReload.prometheus.enabled` | If false, the configmap-reload container for Prometheus will not be deployed | `true`
+`configmapReload.prometheus.name` | configmap-reload container name | `configmap-reload`
+`configmapReload.prometheus.image.repository` | configmap-reload container image repository | `jimmidyson/configmap-reload`
+`configmapReload.prometheus.image.tag` | configmap-reload container image tag | `v0.3.0`
+`configmapReload.prometheus.image.pullPolicy` | configmap-reload container image pull policy | `IfNotPresent`
+`configmapReload.prometheus.extraArgs` | Additional configmap-reload container arguments | `{}`
+`configmapReload.prometheus.extraVolumeDirs` | Additional configmap-reload volume directories | `{}`
+`configmapReload.prometheus.extraConfigmapMounts` | Additional configmap-reload configMap mounts | `[]`
+`configmapReload.prometheus.resources` | configmap-reload pod resource requests & limits | `{}`
+`configmapReload.alertmanager.enabled` | If false, the configmap-reload container for AlertManager will not be deployed | `true`
+`configmapReload.alertmanager.name` | configmap-reload container name | `configmap-reload`
+`configmapReload.alertmanager.image.repository` | configmap-reload container image repository | `jimmidyson/configmap-reload`
+`configmapReload.alertmanager.image.tag` | configmap-reload container image tag | `v0.3.0`
+`configmapReload.alertmanager.image.pullPolicy` | configmap-reload container image pull policy | `IfNotPresent`
+`configmapReload.alertmanager.extraArgs` | Additional configmap-reload container arguments | `{}`
+`configmapReload.alertmanager.extraVolumeDirs` | Additional configmap-reload volume directories | `{}`
+`configmapReload.alertmanager.extraConfigmapMounts` | Additional configmap-reload configMap mounts | `[]`
+`configmapReload.alertmanager.resources` | configmap-reload pod resource requests & limits | `{}`
+`initChownData.enabled`  | If false, don't reset data ownership at startup | true
+`initChownData.name` | init-chown-data container name | `init-chown-data`
+`initChownData.image.repository` | init-chown-data container image repository | `busybox`
+`initChownData.image.tag` | init-chown-data container image tag | `latest`
+`initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent`
+`initChownData.resources` | init-chown-data pod resource requests & limits | `{}`
+`kubeStateMetrics.enabled` | If true, create kube-state-metrics sub-chart, see the [kube-state-metrics chart for configuration options](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) | `true`
+`kube-state-metrics` | [kube-state-metrics configuration options](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) | `Same as sub-chart's`
+`nodeExporter.enabled` | If true, create node-exporter | `true`
+`nodeExporter.name` | node-exporter container name | `node-exporter`
+`nodeExporter.image.repository` | node-exporter container image repository| `prom/node-exporter`
+`nodeExporter.image.tag` | node-exporter container image tag | `v0.18.1`
+`nodeExporter.image.pullPolicy` | node-exporter container image pull policy | `IfNotPresent`
+`nodeExporter.extraArgs` | Additional node-exporter container arguments | `{}`
+`nodeExporter.extraInitContainers` | Init containers to launch alongside the node-exporter | `[]`
+`nodeExporter.extraHostPathMounts` | Additional node-exporter hostPath mounts | `[]`
+`nodeExporter.extraConfigmapMounts` | Additional node-exporter configMap mounts | `[]`
+`nodeExporter.hostNetwork` | If true, node-exporter pods share the host network namespace | `true`
+`nodeExporter.hostPID` | If true, node-exporter pods share the host PID namespace | `true`
+`nodeExporter.nodeSelector` | node labels for node-exporter pod assignment | `{}`
+`nodeExporter.podAnnotations` | annotations to be added to node-exporter pods | `{}`
+`nodeExporter.pod.labels` | labels to be added to node-exporter pods | `{}`
+`nodeExporter.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false`
+`nodeExporter.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1`
+`nodeExporter.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` |
+`nodeExporter.podSecurityPolicy.enabled` | Specify if a Pod Security Policy for node-exporter must be created | `false`
+`nodeExporter.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
+`nodeExporter.priorityClassName` | node-exporter priorityClassName | `nil`
+`nodeExporter.resources` | node-exporter resource requests and limits (YAML) | `{}`
+`nodeExporter.securityContext` | securityContext for containers in pod | `{}`
+`nodeExporter.service.annotations` | annotations for node-exporter service | `{prometheus.io/scrape: "true"}`
+`nodeExporter.service.clusterIP` | internal node-exporter cluster service IP | `None`
+`nodeExporter.service.externalIPs` | node-exporter service external IP addresses | `[]`
+`nodeExporter.service.hostPort` | node-exporter service host port | `9100`
+`nodeExporter.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""`
+`nodeExporter.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]`
+`nodeExporter.service.servicePort` | node-exporter service port | `9100`
+`nodeExporter.service.type` | type of node-exporter service to create | `ClusterIP`
+`podSecurityPolicy.enabled` | If true, create & use pod security policies resources | `false`
+`pushgateway.enabled` | If true, create pushgateway | `true`
+`pushgateway.name` | pushgateway container name | `pushgateway`
+`pushgateway.image.repository` | pushgateway container image repository | `prom/pushgateway`
+`pushgateway.image.tag` | pushgateway container image tag | `v1.0.1`
+`pushgateway.image.pullPolicy` | pushgateway container image pull policy | `IfNotPresent`
+`pushgateway.extraArgs` | Additional pushgateway container arguments | `{}`
+`pushgateway.extraInitContainers` | Init containers to launch alongside the pushgateway | `[]`
+`pushgateway.ingress.enabled` | If true, pushgateway Ingress will be created | `false`
+`pushgateway.ingress.annotations` | pushgateway Ingress annotations | `{}`
+`pushgateway.ingress.hosts` | pushgateway Ingress hostnames | `[]`
+`pushgateway.ingress.extraPaths` | Ingress extra paths to prepend to every pushgateway host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions) | `[]`
+`pushgateway.ingress.tls` | pushgateway Ingress TLS configuration (YAML) | `[]`
+`pushgateway.nodeSelector` | node labels for pushgateway pod assignment | `{}`
+`pushgateway.podAnnotations` | annotations to be added to pushgateway pods | `{}`
+`pushgateway.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` |
+`pushgateway.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
+`pushgateway.replicaCount` | desired number of pushgateway pods | `1`
+`pushgateway.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false`
+`pushgateway.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1`
+`pushgateway.schedulerName` | pushgateway alternate scheduler name | `nil`
+`pushgateway.persistentVolume.enabled` | If true, Prometheus pushgateway will create a Persistent Volume Claim | `false`
+`pushgateway.persistentVolume.accessModes` | Prometheus pushgateway data Persistent Volume access modes | `[ReadWriteOnce]`
+`pushgateway.persistentVolume.annotations` | Prometheus pushgateway data Persistent Volume annotations | `{}`
+`pushgateway.persistentVolume.existingClaim` | Prometheus pushgateway data Persistent Volume existing claim name | `""`
+`pushgateway.persistentVolume.mountPath` | Prometheus pushgateway data Persistent Volume mount root path | `/data`
+`pushgateway.persistentVolume.size` | Prometheus pushgateway data Persistent Volume size | `2Gi`
+`pushgateway.persistentVolume.storageClass` | Prometheus pushgateway data Persistent Volume Storage Class |  `unset`
+`pushgateway.persistentVolume.volumeBindingMode` | Prometheus pushgateway data Persistent Volume Binding Mode |  `unset`
+`pushgateway.persistentVolume.subPath` | Subdirectory of Prometheus server data Persistent Volume to mount | `""`
+`pushgateway.priorityClassName` | pushgateway priorityClassName | `nil`
+`pushgateway.resources` | pushgateway pod resource requests & limits | `{}`
+`pushgateway.service.annotations` | annotations for pushgateway service | `{}`
+`pushgateway.service.clusterIP` | internal pushgateway cluster service IP | `""`
+`pushgateway.service.externalIPs` | pushgateway service external IP addresses | `[]`
+`pushgateway.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""`
+`pushgateway.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]`
+`pushgateway.service.servicePort` | pushgateway service port | `9091`
+`pushgateway.service.type` | type of pushgateway service to create | `ClusterIP`
+`pushgateway.strategy` | Deployment strategy | `{ "type": "RollingUpdate" }`
+`rbac.create` | If true, create & use RBAC resources | `true`
+`server.enabled` | If false, Prometheus server will not be created | `true`
+`server.name` | Prometheus server container name | `server`
+`server.image.repository` | Prometheus server container image repository | `prom/prometheus`
+`server.image.tag` | Prometheus server container image tag | `v2.18.1`
+`server.image.pullPolicy` | Prometheus server container image pull policy | `IfNotPresent`
+`server.configPath` |  Path to a prometheus server config file on the container FS  | `/etc/config/prometheus.yml`
+`server.global.scrape_interval` | How frequently to scrape targets by default | `1m`
+`server.global.scrape_timeout` | How long until a scrape request times out | `10s`
+`server.global.evaluation_interval` | How frequently to evaluate rules | `1m`
+`server.remoteWrite` | The remote write feature of Prometheus allow transparently sending samples. | `[]`
+`server.remoteRead` | The remote read feature of Prometheus allow transparently receiving samples. | `[]`
+`server.extraArgs` | Additional Prometheus server container arguments | `{}`
+`server.extraFlags` | Additional Prometheus server container flags | `["web.enable-lifecycle"]`
+`server.extraInitContainers` | Init containers to launch alongside the server | `[]`
+`server.prefixURL` | The prefix slug at which the server can be accessed | ``
+`server.baseURL` | The external url at which the server can be accessed | ``
+`server.env` | Prometheus server environment variables | `[]`
+`server.extraHostPathMounts` | Additional Prometheus server hostPath mounts | `[]`
+`server.extraConfigmapMounts` | Additional Prometheus server configMap mounts | `[]`
+`server.extraSecretMounts` | Additional Prometheus server Secret mounts | `[]`
+`server.extraVolumeMounts` | Additional Prometheus server Volume mounts | `[]`
+`server.extraVolumes` | Additional Prometheus server Volumes | `[]`
+`server.configMapOverrideName` | Prometheus server ConfigMap override where full-name is `{{.Release.Name}}-{{.Values.server.configMapOverrideName}}` and setting this value will prevent the default server ConfigMap from being generated | `""`
+`server.ingress.enabled` | If true, Prometheus server Ingress will be created | `false`
+`server.ingress.annotations` | Prometheus server Ingress annotations | `[]`
+`server.ingress.extraLabels` | Prometheus server Ingress additional labels | `{}`
+`server.ingress.hosts` | Prometheus server Ingress hostnames | `[]`
+`server.ingress.extraPaths` | Ingress extra paths to prepend to every Prometheus server host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions) | `[]`
+`server.ingress.tls` | Prometheus server Ingress TLS configuration (YAML) | `[]`
+`server.nodeSelector` | node labels for Prometheus server pod assignment | `{}`
+`server.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]`
+`server.affinity` | pod affinity | `{}`
+`server.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false`
+`server.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1`
+`server.priorityClassName` | Prometheus server priorityClassName | `nil`
+`server.schedulerName` | Prometheus server alternate scheduler name | `nil`
+`server.persistentVolume.enabled` | If true, Prometheus server will create a Persistent Volume Claim | `true`
+`server.persistentVolume.accessModes` | Prometheus server data Persistent Volume access modes | `[ReadWriteOnce]`
+`server.persistentVolume.annotations` | Prometheus server data Persistent Volume annotations | `{}`
+`server.persistentVolume.existingClaim` | Prometheus server data Persistent Volume existing claim name | `""`
+`server.persistentVolume.mountPath` | Prometheus server data Persistent Volume mount root path | `/data`
+`server.persistentVolume.size` | Prometheus server data Persistent Volume size | `8Gi`
+`server.persistentVolume.storageClass` | Prometheus server data Persistent Volume Storage Class |  `unset`
+`server.persistentVolume.volumeBindingMode` | Prometheus server data Persistent Volume Binding Mode | `unset`
+`server.persistentVolume.subPath` | Subdirectory of Prometheus server data Persistent Volume to mount | `""`
+`server.emptyDir.sizeLimit` | emptyDir sizeLimit if a Persistent Volume is not used | `""`
+`server.podAnnotations` | annotations to be added to Prometheus server pods | `{}`
+`server.podLabels` | labels to be added to Prometheus server pods | `{}`
+`server.alertmanagers` | Prometheus AlertManager configuration for the Prometheus server | `{}`
+`server.deploymentAnnotations` | annotations to be added to Prometheus server deployment | `{}`
+`server.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` |
+`server.replicaCount` | desired number of Prometheus server pods | `1`
+`server.statefulSet.enabled` | If true, use a statefulset instead of a deployment for pod management | `false`
+`server.statefulSet.annotations` | annotations to be added to Prometheus server stateful set | `{}`
+`server.statefulSet.labels` | labels to be added to Prometheus server stateful set | `{}`
+`server.statefulSet.podManagementPolicy` | podManagementPolicy of server pods | `OrderedReady`
+`server.statefulSet.headless.annotations` | annotations for Prometheus server headless service | `{}`
+`server.statefulSet.headless.labels` | labels for Prometheus server headless service | `{}`
+`server.statefulSet.headless.servicePort` | Prometheus server headless service port | `80`
+`server.resources` | Prometheus server resource requests and limits | `{}`
+`server.verticalAutoscaler.enabled` | If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) | `false`
+`server.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for server containers | `{}`
+`server.service.annotations` | annotations for Prometheus server service | `{}`
+`server.service.clusterIP` | internal Prometheus server cluster service IP | `""`
+`server.service.externalIPs` | Prometheus server service external IP addresses | `[]`
+`server.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""`
+`server.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]`
+`server.service.nodePort` | Port to be used as the service NodePort (ignored if `server.service.type` is not `NodePort`) | `0`
+`server.service.servicePort` | Prometheus server service port | `80`
+`server.service.sessionAffinity` | Session Affinity for server service, can be `None` or `ClientIP` | `None`
+`server.service.type` | type of Prometheus server service to create | `ClusterIP`
+`server.service.gRPC.enabled` | If true, open a second port on the service for gRPC | `false`
+`server.service.gRPC.servicePort` | Prometheus service gRPC port, (ignored if `server.service.gRPC.enabled` is not `true`) | `10901`
+`server.service.gRPC.nodePort` | Port to be used as gRPC nodePort in the prometheus service | `0`
+`server.service.statefulsetReplica.enabled` | If true, send the traffic from the service to only one replica of the replicaset | `false`
+`server.service.statefulsetReplica.replica` | Which replica to send the traffice to | `0`
+`server.hostAliases` | /etc/hosts-entries in container(s) | []
+`server.sidecarContainers` | array of snippets with your sidecar containers for prometheus server | `""`
+`server.strategy` | Deployment strategy | `{ "type": "RollingUpdate" }`
+`serviceAccounts.alertmanager.create` | If true, create the alertmanager service account | `true`
+`serviceAccounts.alertmanager.name` | name of the alertmanager service account to use or create | `{{ prometheus.alertmanager.fullname }}`
+`serviceAccounts.alertmanager.annotations` | annotations for the alertmanager service account | `{}`
+`serviceAccounts.nodeExporter.create` | If true, create the nodeExporter service account | `true`
+`serviceAccounts.nodeExporter.name` | name of the nodeExporter service account to use or create | `{{ prometheus.nodeExporter.fullname }}`
+`serviceAccounts.nodeExporter.annotations` | annotations for the nodeExporter service account | `{}`
+`serviceAccounts.pushgateway.create` | If true, create the pushgateway service account | `true`
+`serviceAccounts.pushgateway.name` | name of the pushgateway service account to use or create | `{{ prometheus.pushgateway.fullname }}`
+`serviceAccounts.pushgateway.annotations` | annotations for the pushgateway service account | `{}`
+`serviceAccounts.server.create` | If true, create the server service account | `true`
+`serviceAccounts.server.name` | name of the server service account to use or create | `{{ prometheus.server.fullname }}`
+`serviceAccounts.server.annotations` | annotations for the server service account | `{}`
+`server.terminationGracePeriodSeconds` | Prometheus server Pod termination grace period | `300`
+`server.retention` | (optional) Prometheus data retention | `"15d"`
+`serverFiles.alerts` | (Deprecated) Prometheus server alerts configuration | `{}`
+`serverFiles.rules` | (Deprecated) Prometheus server rules configuration | `{}`
+`serverFiles.alerting_rules.yml` | Prometheus server alerts configuration | `{}`
+`serverFiles.recording_rules.yml` | Prometheus server rules configuration | `{}`
+`serverFiles.prometheus.yml` | Prometheus server scrape configuration | example configuration
+`extraScrapeConfigs` | Prometheus server additional scrape configuration | ""
+`alertRelabelConfigs` | Prometheus server [alert relabeling configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs) for H/A prometheus | ""
+`networkPolicy.enabled` | Enable NetworkPolicy | `false`
+`forceNamespace` | Force resources to be namespaced | `null` |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install stable/prometheus --name my-release \
+    --set server.terminationGracePeriodSeconds=360
+```
+
+Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install stable/prometheus --name my-release -f values.yaml
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+Note that you have multiple yaml files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example,
+
+```yaml
+# values.yaml
+# ...
+
+# service1-alert.yaml
+serverFiles:
+  alerts:
+    service1:
+      - alert: anAlert
+      # ...
+
+# service2-alert.yaml
+serverFiles:
+  alerts:
+    service2:
+      - alert: anAlert
+      # ...
+```
+
+```console
+$ helm install stable/prometheus --name my-release -f values.yaml -f service1-alert.yaml -f service2-alert.yaml
+```
+
+### RBAC Configuration
+Roles and RoleBindings resources will be created automatically for `server` service.
+
+To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account.
+
+> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own.
+
+### ConfigMap Files
+AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod.
+
+Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod.
+
+### Ingress TLS
+If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [kube-lego](https://github.com/jetstack/kube-lego)), please refer to the documentation for that mechanism.
+
+To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace:
+
+```console
+kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key
+```
+
+Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file:
+
+```yaml
+server:
+  ingress:
+    ## If true, Prometheus server Ingress will be created
+    ##
+    enabled: true
+
+    ## Prometheus server Ingress hostnames
+    ## Must be provided if Ingress is enabled
+    ##
+    hosts:
+      - prometheus.domain.com
+
+    ## Prometheus server Ingress TLS configuration
+    ## Secrets must be manually created in the namespace
+    ##
+    tls:
+      - secretName: prometheus-server-tls
+        hosts:
+          - prometheus.domain.com
+```
+
+### NetworkPolicy
+
+Enabling Network Policy for Prometheus will secure connections to Alert Manager
+and Kube State Metrics by only accepting connections from Prometheus Server.
+All inbound connections to Prometheus Server are still allowed.
+
+To enable network policy for Prometheus, install a networking plugin that
+implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true.
+
+If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need
+to manually create a networkpolicy which allows it.
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/.helmignore b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/.helmignore
new file mode 100644 (file)
index 0000000..f0c1319
--- /dev/null
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/Chart.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/Chart.yaml
new file mode 100644 (file)
index 0000000..7752ccb
--- /dev/null
@@ -0,0 +1,20 @@
+apiVersion: v1
+appVersion: 1.9.5
+description: Install kube-state-metrics to generate and expose cluster-level metrics
+home: https://github.com/kubernetes/kube-state-metrics/
+keywords:
+- metric
+- monitoring
+- prometheus
+- kubernetes
+maintainers:
+- email: jose@armesto.net
+  name: fiunchinho
+- email: tariq.ibrahim@mulesoft.com
+  name: tariq1890
+- email: manuel@rueg.eu
+  name: mrueg
+name: kube-state-metrics
+sources:
+- https://github.com/kubernetes/kube-state-metrics/
+version: 2.7.2
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/OWNERS b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/OWNERS
new file mode 100644 (file)
index 0000000..6ffd97d
--- /dev/null
@@ -0,0 +1,8 @@
+approvers:
+- fiunchinho
+- tariq1890
+- mrueg
+reviewers:
+- fiunchinho
+- tariq1890
+- mrueg
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/README.md b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/README.md
new file mode 100644 (file)
index 0000000..5c64569
--- /dev/null
@@ -0,0 +1,73 @@
+# kube-state-metrics Helm Chart
+
+* Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics).
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```bash
+$ helm install stable/kube-state-metrics
+```
+
+## Configuration
+
+| Parameter                                    | Description                                                                           | Default                                    |
+|:---------------------------------------------|:--------------------------------------------------------------------------------------|:-------------------------------------------|
+| `image.repository`                           | The image repository to pull from                                                     | quay.io/coreos/kube-state-metrics          |
+| `image.tag`                                  | The image tag to pull from                                                            | `v1.9.5`                                   |
+| `image.pullPolicy`                           | Image pull policy                                                                     | `IfNotPresent`                             |
+| `replicas`                                   | Number of replicas                                                                    | `1`                                        |
+| `autosharding.enabled`                       | Set to `true` to automatically shard data across `replicas` pods. EXPERIMENTAL        | `false`                                    |
+| `service.port`                               | The port of the container                                                             | `8080`                                     |
+| `service.annotations`                        | Annotations to be added to the service                                                | `{}`                                       |
+| `customLabels`                               | Custom labels to apply to service, deployment and pods                                | `{}`                                       |
+| `hostNetwork`                                | Whether or not to use the host network                                                | `false`                                    |
+| `prometheusScrape`                           | Whether or not enable prom scrape                                                     | `true`                                     |
+| `rbac.create`                                | If true, create & use RBAC resources                                                  | `true`                                     |
+| `serviceAccount.create`                      | If true, create & use serviceAccount                                                  | `true`                                     |
+| `serviceAccount.name`                        | If not set & create is true, use template fullname                                    |                                            |
+| `serviceAccount.imagePullSecrets`            | Specify image pull secrets field                                                      | `[]`                                       |
+| `podSecurityPolicy.enabled`                  | If true, create & use PodSecurityPolicy resources                                     | `false`                                    |
+| `podSecurityPolicy.annotations`              | Specify pod annotations in the pod security policy                                    | {}                                         |
+| `securityContext.enabled`                    | Enable security context                                                               | `true`                                     |
+| `securityContext.fsGroup`                    | Group ID for the container                                                            | `65534`                                    |
+| `securityContext.runAsUser`                  | User ID for the container                                                             | `65534`                                    |
+| `priorityClassName`                          | Name of Priority Class to assign pods                                                 | `nil`                                      |
+| `nodeSelector`                               | Node labels for pod assignment                                                        | {}                                         |
+| `affinity`                                   | Affinity settings for pod assignment                                                  | {}                                         |
+| `tolerations`                                | Tolerations for pod assignment                                                        | []                                         |
+| `podAnnotations`                             | Annotations to be added to the pod                                                    | {}                                         |
+| `resources`                                  | kube-state-metrics resource requests and limits                                       | {}                                         |
+| `collectors.certificatesigningrequests`      | Enable the certificatesigningrequests collector.                                      | `true`                                     |
+| `collectors.configmaps`                      | Enable the configmaps collector.                                                      | `true`                                     |
+| `collectors.cronjobs`                        | Enable the cronjobs collector.                                                        | `true`                                     |
+| `collectors.daemonsets`                      | Enable the daemonsets collector.                                                      | `true`                                     |
+| `collectors.deployments`                     | Enable the deployments collector.                                                     | `true`                                     |
+| `collectors.endpoints`                       | Enable the endpoints collector.                                                       | `true`                                     |
+| `collectors.horizontalpodautoscalers`        | Enable the horizontalpodautoscalers collector.                                        | `true`                                     |
+| `collectors.ingresses`                       | Enable the ingresses collector.                                                       | `true`                                     |
+| `collectors.jobs`                            | Enable the jobs collector.                                                            | `true`                                     |
+| `collectors.limitranges`                     | Enable the limitranges collector.                                                     | `true`                                     |
+| `collectors.mutatingwebhookconfigurations`   | Enable the mutatingwebhookconfigurations collector.                                   | `false`                                    | 
+| `collectors.namespaces`                      | Enable the namespaces collector.                                                      | `true`                                     |
+| `collectors.nodes`                           | Enable the nodes collector.                                                           | `true`                                     |
+| `collectors.persistentvolumeclaims`          | Enable the persistentvolumeclaims collector.                                          | `true`                                     |
+| `collectors.persistentvolumes`               | Enable the persistentvolumes collector.                                               | `true`                                     |
+| `collectors.poddisruptionbudgets`            | Enable the poddisruptionbudgets collector.                                            | `true`                                     |
+| `collectors.pods`                            | Enable the pods collector.                                                            | `true`                                     |
+| `collectors.replicasets`                     | Enable the replicasets collector.                                                     | `true`                                     |
+| `collectors.replicationcontrollers`          | Enable the replicationcontrollers collector.                                          | `true`                                     |
+| `collectors.resourcequotas`                  | Enable the resourcequotas collector.                                                  | `true`                                     |
+| `collectors.secrets`                         | Enable the secrets collector.                                                         | `true`                                     |
+| `collectors.services`                        | Enable the services collector.                                                        | `true`                                     |
+| `collectors.statefulsets`                    | Enable the statefulsets collector.                                                    | `true`                                     |
+| `collectors.storageclasses`                  | Enable the storageclasses collector.                                                  | `true`                                     |
+| `collectors.validatingwebhookconfigurations` | Enable the validatingwebhookconfigurations collector.                                 | `false`                                    |
+| `collectors.verticalpodautoscalers`          | Enable the verticalpodautoscalers collector.                                          | `false`                                    |
+| `collectors.volumeattachments`               | Enable the volumeattachments collector.                                               | `false`                                    |
+| `prometheus.monitor.enabled`                 | Set this to `true` to create ServiceMonitor for Prometheus operator                   | `false`                                    |
+| `prometheus.monitor.additionalLabels`        | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}`                                       |
+| `prometheus.monitor.namespace`               | Namespace where servicemonitor resource should be created                             | `the same namespace as kube-state-metrics` |
+| `prometheus.monitor.honorLabels`             | Honor metric labels                                                                   | `false`                                    |
+| `namespaceOverride`                          | Override the deployment namespace                                                     | `""` (`Release.Namespace`)                 |
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/NOTES.txt b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/NOTES.txt
new file mode 100644 (file)
index 0000000..5a646e0
--- /dev/null
@@ -0,0 +1,10 @@
+kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects.
+The exposed metrics can be found here:
+https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics
+
+The metrics are exported on the HTTP endpoint /metrics on the listening port.
+In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics
+
+They are served either as plaintext or protobuf depending on the Accept header.
+They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint.
+
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl
new file mode 100644 (file)
index 0000000..6ae0e64
--- /dev/null
@@ -0,0 +1,47 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "kube-state-metrics.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "kube-state-metrics.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "kube-state-metrics.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+    {{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Allow the release namespace to be overridden for multi-namespace deployments in combined charts
+*/}}
+{{- define "kube-state-metrics.namespace" -}}
+  {{- if .Values.namespaceOverride -}}
+    {{- .Values.namespaceOverride -}}
+  {{- else -}}
+    {{- .Release.Namespace -}}
+  {{- end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/clusterrole.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/clusterrole.yaml
new file mode 100644 (file)
index 0000000..319aec1
--- /dev/null
@@ -0,0 +1,180 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  labels:
+    app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+    helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+  name: {{ template "kube-state-metrics.fullname" . }}
+rules:
+{{ if .Values.collectors.certificatesigningrequests }}
+- apiGroups: ["certificates.k8s.io"]
+  resources:
+  - certificatesigningrequests
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.configmaps }}
+- apiGroups: [""]
+  resources:
+  - configmaps
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.cronjobs }}
+- apiGroups: ["batch"]
+  resources:
+  - cronjobs
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.daemonsets }}
+- apiGroups: ["extensions", "apps"]
+  resources:
+  - daemonsets
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.deployments }}
+- apiGroups: ["extensions", "apps"]
+  resources:
+  - deployments
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.endpoints }}
+- apiGroups: [""]
+  resources:
+  - endpoints
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.horizontalpodautoscalers }}
+- apiGroups: ["autoscaling"]
+  resources:
+  - horizontalpodautoscalers
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.ingresses }}
+- apiGroups: ["extensions", "networking.k8s.io"]
+  resources:
+  - ingresses
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.jobs }}
+- apiGroups: ["batch"]
+  resources:
+  - jobs
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.limitranges }}
+- apiGroups: [""]
+  resources:
+  - limitranges
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.mutatingwebhookconfigurations }}
+- apiGroups: ["admissionregistration.k8s.io"]
+  resources:
+    - mutatingwebhookconfigurations
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.namespaces }}
+- apiGroups: [""]
+  resources:
+  - namespaces
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.networkpolicies }}
+- apiGroups: ["networking.k8s.io"]
+  resources:
+  - networkpolicies
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.nodes }}
+- apiGroups: [""]
+  resources:
+  - nodes
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.persistentvolumeclaims }}
+- apiGroups: [""]
+  resources:
+  - persistentvolumeclaims
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.persistentvolumes }}
+- apiGroups: [""]
+  resources:
+  - persistentvolumes
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.poddisruptionbudgets }}
+- apiGroups: ["policy"]
+  resources:
+    - poddisruptionbudgets
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.pods }}
+- apiGroups: [""]
+  resources:
+  - pods
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.replicasets }}
+- apiGroups: ["extensions", "apps"]
+  resources:
+  - replicasets
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.replicationcontrollers }}
+- apiGroups: [""]
+  resources:
+  - replicationcontrollers
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.resourcequotas }}
+- apiGroups: [""]
+  resources:
+  - resourcequotas
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.secrets }}
+- apiGroups: [""]
+  resources:
+  - secrets
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.services }}
+- apiGroups: [""]
+  resources:
+  - services
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.statefulsets }}
+- apiGroups: ["apps"]
+  resources:
+  - statefulsets
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.storageclasses }}
+- apiGroups: ["storage.k8s.io"]
+  resources:
+    - storageclasses
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.validatingwebhookconfigurations }}
+- apiGroups: ["admissionregistration.k8s.io"]
+  resources:
+    - validatingwebhookconfigurations
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.volumeattachments }}
+- apiGroups: ["storage.k8s.io"]
+  resources:
+    - volumeattachments
+  verbs: ["list", "watch"]
+{{ end -}}
+{{ if .Values.collectors.verticalpodautoscalers }}
+- apiGroups: ["autoscaling.k8s.io"]
+  resources:
+    - verticalpodautoscalers
+  verbs: ["list", "watch"]
+{{ end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml
new file mode 100644 (file)
index 0000000..4635985
--- /dev/null
@@ -0,0 +1,19 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+    helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+  name: {{ template "kube-state-metrics.fullname" . }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: {{ template "kube-state-metrics.fullname" . }}
+subjects:
+- kind: ServiceAccount
+  name: {{ template "kube-state-metrics.fullname" . }}
+  namespace: {{ template "kube-state-metrics.namespace" . }}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/deployment.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/deployment.yaml
new file mode 100644 (file)
index 0000000..b6affcc
--- /dev/null
@@ -0,0 +1,186 @@
+apiVersion: apps/v1
+{{- if .Values.autosharding.enabled }}
+kind: StatefulSet
+{{- else }}
+kind: Deployment
+{{- end }}
+metadata:
+  name: {{ template "kube-state-metrics.fullname" . }}
+  namespace: {{ template "kube-state-metrics.namespace" . }}
+  labels:
+    app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+    helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app.kubernetes.io/instance: "{{ .Release.Name }}"
+    app.kubernetes.io/managed-by: "{{ .Release.Service }}"
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 4 }}
+{{- end }}
+spec:
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+  replicas: {{ .Values.replicas }}
+{{- if .Values.autosharding.enabled }}
+  serviceName: {{ template "kube-state-metrics.fullname" . }}
+  volumeClaimTemplates: []
+{{- end }}
+  template:
+    metadata:
+      labels:
+        app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+        app.kubernetes.io/instance: "{{ .Release.Name }}"
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 8 }}
+{{- end }}
+{{- if .Values.podAnnotations }}
+      annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+{{- end }}
+    spec:
+      hostNetwork: {{ .Values.hostNetwork }}
+      serviceAccountName: {{ template "kube-state-metrics.serviceAccountName" . }}
+      {{- if .Values.securityContext.enabled }}
+      securityContext:
+        fsGroup: {{ .Values.securityContext.fsGroup }}
+        runAsUser: {{ .Values.securityContext.runAsUser }}
+      {{- end }}
+    {{- if .Values.priorityClassName }}
+      priorityClassName: {{ .Values.priorityClassName }}
+    {{- end }}
+      containers:
+      - name: {{ .Chart.Name }}
+{{- if .Values.autosharding.enabled }}
+        env:
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+{{- end }}
+        args:
+{{  if .Values.collectors.certificatesigningrequests  }}
+        - --collectors=certificatesigningrequests
+{{  end  }}
+{{  if .Values.collectors.configmaps  }}
+        - --collectors=configmaps
+{{  end  }}
+{{  if .Values.collectors.cronjobs  }}
+        - --collectors=cronjobs
+{{  end  }}
+{{ if .Values.collectors.daemonsets  }}
+        - --collectors=daemonsets
+{{  end  }}
+{{  if .Values.collectors.deployments  }}
+        - --collectors=deployments
+{{  end  }}
+{{  if .Values.collectors.endpoints  }}
+        - --collectors=endpoints
+{{  end  }}
+{{  if .Values.collectors.horizontalpodautoscalers  }}
+        - --collectors=horizontalpodautoscalers
+{{  end  }}
+{{  if .Values.collectors.ingresses  }}
+        - --collectors=ingresses
+{{  end  }}
+{{  if .Values.collectors.jobs  }}
+        - --collectors=jobs
+{{  end  }}
+{{  if .Values.collectors.limitranges  }}
+        - --collectors=limitranges
+{{  end  }}
+{{  if .Values.collectors.mutatingwebhookconfigurations  }}
+        - --collectors=mutatingwebhookconfigurations
+{{  end  }}
+{{  if .Values.collectors.namespaces  }}
+        - --collectors=namespaces
+{{  end  }}
+{{  if .Values.collectors.networkpolicies  }}
+        - --collectors=networkpolicies
+{{  end  }}
+{{  if .Values.collectors.nodes  }}
+        - --collectors=nodes
+{{  end  }}
+{{  if .Values.collectors.persistentvolumeclaims  }}
+        - --collectors=persistentvolumeclaims
+{{  end  }}
+{{  if .Values.collectors.persistentvolumes  }}
+        - --collectors=persistentvolumes
+{{  end  }}
+{{  if .Values.collectors.poddisruptionbudgets  }}
+        - --collectors=poddisruptionbudgets
+{{  end  }}
+{{  if .Values.collectors.pods  }}
+        - --collectors=pods
+{{  end  }}
+{{  if .Values.collectors.replicasets  }}
+        - --collectors=replicasets
+{{  end  }}
+{{  if .Values.collectors.replicationcontrollers  }}
+        - --collectors=replicationcontrollers
+{{  end  }}
+{{  if .Values.collectors.resourcequotas  }}
+        - --collectors=resourcequotas
+{{  end  }}
+{{  if .Values.collectors.secrets  }}
+        - --collectors=secrets
+{{  end  }}
+{{  if .Values.collectors.services  }}
+        - --collectors=services
+{{  end  }}
+{{  if .Values.collectors.statefulsets  }}
+        - --collectors=statefulsets
+{{  end  }}
+{{  if .Values.collectors.storageclasses  }}
+        - --collectors=storageclasses
+{{  end  }}
+{{  if .Values.collectors.validatingwebhookconfigurations  }}
+        - --collectors=validatingwebhookconfigurations
+{{  end  }}
+{{  if .Values.collectors.verticalpodautoscalers  }}
+        - --collectors=verticalpodautoscalers
+{{  end  }}
+{{  if .Values.collectors.volumeattachments  }}
+        - --collectors=volumeattachments
+{{  end  }}
+{{ if .Values.namespace }}
+        - --namespace={{ .Values.namespace }}
+{{ end }}
+{{ if .Values.autosharding.enabled }}
+        - --pod=$(POD_NAME)
+        - --pod-namespace=$(POD_NAMESPACE)
+{{ end }}
+        imagePullPolicy: {{ .Values.image.pullPolicy }}
+        image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+        ports:
+        - containerPort: 8080
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 8080
+          initialDelaySeconds: 5
+          timeoutSeconds: 5
+        readinessProbe:
+          httpGet:
+            path: /
+            port: 8080
+          initialDelaySeconds: 5
+          timeoutSeconds: 5
+{{- if .Values.resources }}
+        resources:
+{{ toYaml .Values.resources | indent 10 }}
+{{- end }}
+{{- if .Values.affinity }}
+      affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+{{- end }}
+{{- if .Values.nodeSelector }}
+      nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+{{- end }}
+{{- if .Values.tolerations }}
+      tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml
new file mode 100644 (file)
index 0000000..aeff117
--- /dev/null
@@ -0,0 +1,39 @@
+{{- if .Values.podSecurityPolicy.enabled }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+  name: {{ template "kube-state-metrics.fullname" . }}
+  labels:
+    app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+    helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Values.podSecurityPolicy.annotations }}
+  annotations:
+{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }}
+{{- end }}
+spec:
+  privileged: false
+  volumes:
+    - 'secret'
+  hostNetwork: false
+  hostIPC: false
+  hostPID: false
+  runAsUser:
+    rule: 'MustRunAsNonRoot'
+  seLinux:
+    rule: 'RunAsAny'
+  supplementalGroups:
+    rule: 'MustRunAs'
+    ranges:
+      # Forbid adding the root group.
+      - min: 1
+        max: 65535
+  fsGroup:
+    rule: 'MustRunAs'
+    ranges:
+      # Forbid adding the root group.
+      - min: 1
+        max: 65535
+  readOnlyRootFilesystem: false
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml
new file mode 100644 (file)
index 0000000..dcd65e1
--- /dev/null
@@ -0,0 +1,17 @@
+{{- if and .Values.podSecurityPolicy.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+    helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+  name: psp-{{ template "kube-state-metrics.fullname" . }}
+rules:
+- apiGroups: ['extensions']
+  resources: ['podsecuritypolicies']
+  verbs:     ['use']
+  resourceNames:
+  - {{ template "kube-state-metrics.fullname" . }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml
new file mode 100644 (file)
index 0000000..a206e64
--- /dev/null
@@ -0,0 +1,19 @@
+{{- if and .Values.podSecurityPolicy.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+    helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+  name: psp-{{ template "kube-state-metrics.fullname" . }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: psp-{{ template "kube-state-metrics.fullname" . }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ template "kube-state-metrics.fullname" . }}
+    namespace: {{ template "kube-state-metrics.namespace" . }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/service.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/service.yaml
new file mode 100644 (file)
index 0000000..5dacf52
--- /dev/null
@@ -0,0 +1,36 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ template "kube-state-metrics.fullname" . }}
+  namespace: {{ template "kube-state-metrics.namespace" . }}
+  labels:
+    app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+    helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app.kubernetes.io/instance: "{{ .Release.Name }}"
+    app.kubernetes.io/managed-by: "{{ .Release.Service }}"
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 4 }}
+{{- end }}
+  annotations:
+    {{- if .Values.prometheusScrape }}
+    prometheus.io/scrape: '{{ .Values.prometheusScrape }}'
+    {{- end }}
+    {{- if .Values.service.annotations }}
+    {{- toYaml .Values.service.annotations | nindent 4 }}
+    {{- end }}
+spec:
+  type: "{{ .Values.service.type }}"
+  ports:
+  - name: "http"
+    protocol: TCP
+    port: {{ .Values.service.port }}
+  {{- if .Values.service.nodePort }}
+    nodePort: {{ .Values.service.nodePort }}
+  {{- end }}
+    targetPort: 8080
+{{- if .Values.service.loadBalancerIP }}
+  loadBalancerIP: "{{ .Values.service.loadBalancerIP }}"
+{{- end }}
+  selector:
+    app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml
new file mode 100644 (file)
index 0000000..32bb164
--- /dev/null
@@ -0,0 +1,14 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+    helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+  name: {{ template "kube-state-metrics.fullname" . }}
+  namespace: {{ template "kube-state-metrics.namespace" . }}
+imagePullSecrets:
+{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml
new file mode 100644 (file)
index 0000000..54cde36
--- /dev/null
@@ -0,0 +1,25 @@
+{{- if .Values.prometheus.monitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: {{ template "kube-state-metrics.fullname" . }}
+  namespace: {{ template "kube-state-metrics.namespace" . }}
+  labels:
+    app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+    helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    app.kubernetes.io/instance: "{{ .Release.Name }}"
+    app.kubernetes.io/managed-by: "{{ .Release.Service }}"
+    {{- if .Values.prometheus.monitor.additionalLabels }}
+{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }}
+    {{- end }}
+spec:
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+      app.kubernetes.io/instance: {{ .Release.Name }}
+  endpoints:
+    - port: http
+      {{- if .Values.prometheus.monitor.honorLabels }}
+      honorLabels: true
+      {{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml
new file mode 100644 (file)
index 0000000..bf53960
--- /dev/null
@@ -0,0 +1,27 @@
+{{- if and .Values.autosharding.enabled .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }}
+  namespace: {{ template "kube-state-metrics.namespace" . }}
+  labels:
+    app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+    helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  verbs:
+  - get
+- apiGroups:
+  - apps
+  resourceNames:
+  - kube-state-metrics
+  resources:
+  - statefulsets
+  verbs:
+  - get
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml
new file mode 100644 (file)
index 0000000..6a2e5bf
--- /dev/null
@@ -0,0 +1,20 @@
+{{- if and .Values.autosharding.enabled .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }}
+  namespace: {{ template "kube-state-metrics.namespace" . }}
+  labels:
+    app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
+    helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ template "kube-state-metrics.fullname" . }}
+    namespace: {{ template "kube-state-metrics.namespace" . }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/values.yaml b/helm/infrastructure/subcharts/prometheus/charts/kube-state-metrics/values.yaml
new file mode 100644 (file)
index 0000000..57c9255
--- /dev/null
@@ -0,0 +1,126 @@
+# Default values for kube-state-metrics.
+prometheusScrape: true
+image:
+  repository: quay.io/coreos/kube-state-metrics
+  tag: v1.9.5
+  pullPolicy: IfNotPresent
+
+# If set to true, this will deploy kube-state-metrics as a StatefulSet and the data
+# will be automatically sharded across <.Values.replicas> pods using the built-in
+# autodiscovery feature: https://github.com/kubernetes/kube-state-metrics#automated-sharding
+# This is an experimental feature and there are no stability guarantees.
+autosharding:
+  enabled: false
+
+replicas: 1
+
+service:
+  port: 8080
+  # Default to clusterIP for backward compatibility
+  type: ClusterIP
+  nodePort: 0
+  loadBalancerIP: ""
+  annotations: {}
+
+customLabels: {}
+
+hostNetwork: false
+
+rbac:
+  # If true, create & use RBAC resources
+  create: true
+
+serviceAccount:
+  # Specifies whether a ServiceAccount should be created, require rbac true
+  create: true
+  # The name of the ServiceAccount to use.
+  # If not set and create is true, a name is generated using the fullname template
+  name:
+  # Reference to one or more secrets to be used when pulling images
+  # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  imagePullSecrets: []
+
+prometheus:
+  monitor:
+    enabled: false
+    additionalLabels: {}
+    namespace: ""
+    honorLabels: false
+
+## Specify if a Pod Security Policy for kube-state-metrics must be created
+## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+##
+podSecurityPolicy:
+  enabled: false
+  annotations: {}
+    ## Specify pod annotations
+    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+    ##
+    # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+    # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+    # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+
+securityContext:
+  enabled: true
+  runAsUser: 65534
+  fsGroup: 65534
+
+## Node labels for pod assignment
+## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+nodeSelector: {}
+
+## Affinity settings for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+affinity: {}
+
+## Tolerations for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+tolerations: []
+
+# Annotations to be added to the pod
+podAnnotations: {}
+
+## Assign a PriorityClassName to pods if set
+# priorityClassName: ""
+
+# Available collectors for kube-state-metrics. By default all available
+# collectors are enabled.
+collectors:
+  certificatesigningrequests: true
+  configmaps: true
+  cronjobs: true
+  daemonsets: true
+  deployments: true
+  endpoints: true
+  horizontalpodautoscalers: true
+  ingresses: true
+  jobs: true
+  limitranges: true
+  mutatingwebhookconfigurations: false
+  namespaces: true
+  networkpolicies: false
+  nodes: true
+  persistentvolumeclaims: true
+  persistentvolumes: true
+  poddisruptionbudgets: true
+  pods: true
+  replicasets: true
+  replicationcontrollers: true
+  resourcequotas: true
+  secrets: true
+  services: true
+  statefulsets: true
+  storageclasses: true
+  validatingwebhookconfigurations: false
+  verticalpodautoscalers: false
+  volumeattachments: false
+
+# Namespace to be enabled for collecting resources. By default all namespaces are collected.
+# namespace: ""
+
+## Override the deployment namespace
+##
+namespaceOverride: ""
diff --git a/helm/infrastructure/subcharts/prometheus/requirements.yaml b/helm/infrastructure/subcharts/prometheus/requirements.yaml
new file mode 100644 (file)
index 0000000..6e079ae
--- /dev/null
@@ -0,0 +1,7 @@
+dependencies:
+
+  - name: kube-state-metrics
+    version: "2.7.*"
+    repository: https://kubernetes-charts.storage.googleapis.com/
+    condition: kubeStateMetrics.enabled
+
diff --git a/helm/infrastructure/subcharts/prometheus/templates/NOTES.txt b/helm/infrastructure/subcharts/prometheus/templates/NOTES.txt
new file mode 100644 (file)
index 0000000..0e8868f
--- /dev/null
@@ -0,0 +1,112 @@
+{{- if .Values.server.enabled -}}
+The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster:
+{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
+
+{{ if .Values.server.ingress.enabled -}}
+From outside the cluster, the server URL(s) are:
+{{- range .Values.server.ingress.hosts }}
+http://{{ . }}
+{{- end }}
+{{- else }}
+Get the Prometheus server URL by running these commands in the same shell:
+{{- if contains "NodePort" .Values.server.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.server.service.type }}
+  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+        You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}'
+
+  export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+  echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }}
+{{- else if contains "ClusterIP"  .Values.server.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.server.name }}" -o jsonpath="{.items[0].metadata.name}")
+  kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090
+{{- end }}
+{{- end }}
+
+{{- if .Values.server.persistentVolume.enabled }}
+{{- else }}
+#################################################################################
+######   WARNING: Persistence is disabled!!! You will lose your data when   #####
+######            the Server pod is terminated.                             #####
+#################################################################################
+{{- end }}
+{{- end }}
+
+{{ if .Values.alertmanager.enabled }}
+The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.servicePort }} on the following DNS name from within your cluster:
+{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
+
+{{ if .Values.alertmanager.ingress.enabled -}}
+From outside the cluster, the alertmanager URL(s) are:
+{{- range .Values.alertmanager.ingress.hosts }}
+http://{{ . }}
+{{- end }}
+{{- else }}
+Get the Alertmanager URL by running these commands in the same shell:
+{{- if contains "NodePort" .Values.alertmanager.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }}
+  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+        You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}'
+
+  export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+  echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }}
+{{- else if contains "ClusterIP"  .Values.alertmanager.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.alertmanager.name }}" -o jsonpath="{.items[0].metadata.name}")
+  kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093
+{{- end }}
+{{- end }}
+
+{{- if .Values.alertmanager.persistentVolume.enabled }}
+{{- else }}
+#################################################################################
+######   WARNING: Persistence is disabled!!! You will lose your data when   #####
+######            the AlertManager pod is terminated.                       #####
+#################################################################################
+{{- end }}
+{{- end }}
+
+{{- if .Values.nodeExporter.podSecurityPolicy.enabled }}
+{{- else }}
+#################################################################################
+######   WARNING: Pod Security Policy has been moved to a global property.  #####
+######            use .Values.podSecurityPolicy.enabled with pod-based      #####
+######            annotations                                               #####
+######            (e.g. .Values.nodeExporter.podSecurityPolicy.annotations) #####
+#################################################################################
+{{- end }}
+
+{{ if .Values.pushgateway.enabled }}
+The Prometheus PushGateway can be accessed via port {{ .Values.pushgateway.service.servicePort }} on the following DNS name from within your cluster:
+{{ template "prometheus.pushgateway.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
+
+{{ if .Values.pushgateway.ingress.enabled -}}
+From outside the cluster, the pushgateway URL(s) are:
+{{- range .Values.pushgateway.ingress.hosts }}
+http://{{ . }}
+{{- end }}
+{{- else }}
+Get the PushGateway URL by running these commands in the same shell:
+{{- if contains "NodePort" .Values.pushgateway.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.pushgateway.fullname" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.pushgateway.service.type }}
+  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+        You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.pushgateway.fullname" . }}'
+
+  export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+  echo http://$SERVICE_IP:{{ .Values.pushgateway.service.servicePort }}
+{{- else if contains "ClusterIP"  .Values.pushgateway.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.pushgateway.name }}" -o jsonpath="{.items[0].metadata.name}")
+  kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091
+{{- end }}
+{{- end }}
+{{- end }}
+
+For more information on running Prometheus, visit:
+https://prometheus.io/
diff --git a/helm/infrastructure/subcharts/prometheus/templates/_helpers.tpl b/helm/infrastructure/subcharts/prometheus/templates/_helpers.tpl
new file mode 100644 (file)
index 0000000..f2e0329
--- /dev/null
@@ -0,0 +1,249 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "prometheus.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "prometheus.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create unified labels for prometheus components
+*/}}
+{{- define "prometheus.common.matchLabels" -}}
+app: {{ template "prometheus.name" . }}
+release: {{ .Release.Name }}
+{{- end -}}
+
+{{- define "prometheus.common.metaLabels" -}}
+chart: {{ template "prometheus.chart" . }}
+heritage: {{ .Release.Service }}
+{{- end -}}
+
+{{- define "prometheus.alertmanager.labels" -}}
+{{ include "prometheus.alertmanager.matchLabels" . }}
+{{ include "prometheus.common.metaLabels" . }}
+{{- end -}}
+
+{{- define "prometheus.alertmanager.matchLabels" -}}
+component: {{ .Values.alertmanager.name | quote }}
+{{ include "prometheus.common.matchLabels" . }}
+{{- end -}}
+
+{{- define "prometheus.nodeExporter.labels" -}}
+{{ include "prometheus.nodeExporter.matchLabels" . }}
+{{ include "prometheus.common.metaLabels" . }}
+{{- end -}}
+
+{{- define "prometheus.nodeExporter.matchLabels" -}}
+component: {{ .Values.nodeExporter.name | quote }}
+{{ include "prometheus.common.matchLabels" . }}
+{{- end -}}
+
+{{- define "prometheus.pushgateway.labels" -}}
+{{ include "prometheus.pushgateway.matchLabels" . }}
+{{ include "prometheus.common.metaLabels" . }}
+{{- end -}}
+
+{{- define "prometheus.pushgateway.matchLabels" -}}
+component: {{ .Values.pushgateway.name | quote }}
+{{ include "prometheus.common.matchLabels" . }}
+{{- end -}}
+
+{{- define "prometheus.server.labels" -}}
+{{ include "prometheus.server.matchLabels" . }}
+{{ include "prometheus.common.metaLabels" . }}
+{{- end -}}
+
+{{- define "prometheus.server.matchLabels" -}}
+component: {{ .Values.server.name | quote }}
+{{ include "prometheus.common.matchLabels" . }}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "prometheus.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified alertmanager name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+
+{{- define "prometheus.alertmanager.fullname" -}}
+{{- if .Values.alertmanager.fullnameOverride -}}
+{{- .Values.alertmanager.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- printf "%s-%s" .Release.Name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified node-exporter name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "prometheus.nodeExporter.fullname" -}}
+{{- if .Values.nodeExporter.fullnameOverride -}}
+{{- .Values.nodeExporter.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- printf "%s-%s" .Release.Name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified Prometheus server name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "prometheus.server.fullname" -}}
+{{- if .Values.server.fullnameOverride -}}
+{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a fully qualified pushgateway name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "prometheus.pushgateway.fullname" -}}
+{{- if .Values.pushgateway.fullnameOverride -}}
+{{- .Values.pushgateway.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- printf "%s-%s" .Release.Name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "prometheus.deployment.apiVersion" -}}
+{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "^1.9-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+{{/*
+Return the appropriate apiVersion for daemonset.
+*/}}
+{{- define "prometheus.daemonset.apiVersion" -}}
+{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "^1.9-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "prometheus.networkPolicy.apiVersion" -}}
+{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
+{{/*
+Return the appropriate apiVersion for podsecuritypolicy.
+*/}}
+{{- define "prometheus.podSecurityPolicy.apiVersion" -}}
+{{- if semverCompare ">=1.3-0, <1.10-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "^1.10-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "policy/v1beta1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use for the alertmanager component
+*/}}
+{{- define "prometheus.serviceAccountName.alertmanager" -}}
+{{- if .Values.serviceAccounts.alertmanager.create -}}
+    {{ default (include "prometheus.alertmanager.fullname" .) .Values.serviceAccounts.alertmanager.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccounts.alertmanager.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use for the nodeExporter component
+*/}}
+{{- define "prometheus.serviceAccountName.nodeExporter" -}}
+{{- if .Values.serviceAccounts.nodeExporter.create -}}
+    {{ default (include "prometheus.nodeExporter.fullname" .) .Values.serviceAccounts.nodeExporter.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccounts.nodeExporter.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use for the pushgateway component
+*/}}
+{{- define "prometheus.serviceAccountName.pushgateway" -}}
+{{- if .Values.serviceAccounts.pushgateway.create -}}
+    {{ default (include "prometheus.pushgateway.fullname" .) .Values.serviceAccounts.pushgateway.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccounts.pushgateway.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use for the server component
+*/}}
+{{- define "prometheus.serviceAccountName.server" -}}
+{{- if .Values.serviceAccounts.server.create -}}
+    {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccounts.server.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Define the prometheus.namespace template if set with forceNamespace or .Release.Namespace is set
+*/}}
+{{- define "prometheus.namespace" -}}
+{{- if .Values.forceNamespace -}}
+{{ printf "namespace: %s" .Values.forceNamespace }}
+{{- else -}}
+{{ printf "namespace: %s" .Release.Namespace }}
+{{- end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-clusterrole.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-clusterrole.yaml
new file mode 100644 (file)
index 0000000..3cfc133
--- /dev/null
@@ -0,0 +1,21 @@
+{{- if and .Values.alertmanager.enabled .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+  name: {{ template "prometheus.alertmanager.fullname" . }}
+rules:
+{{- if .Values.podSecurityPolicy.enabled }}
+  - apiGroups:
+    - extensions
+    resources:
+    - podsecuritypolicies
+    verbs:
+    - use
+    resourceNames:
+    - {{ template "prometheus.alertmanager.fullname" . }}
+{{- else }}
+  []
+{{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-clusterrolebinding.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-clusterrolebinding.yaml
new file mode 100644 (file)
index 0000000..03102fb
--- /dev/null
@@ -0,0 +1,16 @@
+{{- if and .Values.alertmanager.enabled .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+  name: {{ template "prometheus.alertmanager.fullname" . }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ template "prometheus.serviceAccountName.alertmanager" . }}
+{{ include "prometheus.namespace" . | indent 4 }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: {{ template "prometheus.alertmanager.fullname" . }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-configmap.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-configmap.yaml
new file mode 100644 (file)
index 0000000..cb09bf0
--- /dev/null
@@ -0,0 +1,19 @@
+{{- if and .Values.alertmanager.enabled (and (empty .Values.alertmanager.configMapOverrideName) (empty .Values.alertmanager.configFromSecret)) -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+  name: {{ template "prometheus.alertmanager.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+data:
+{{- $root := . -}}
+{{- range $key, $value := .Values.alertmanagerFiles }}
+  {{- if $key | regexMatch ".*\\.ya?ml$" }}
+  {{ $key }}: |
+{{ toYaml $value | default "{}" | indent 4 }}
+  {{- else }}
+  {{ $key }}: {{ toYaml $value | indent 4 }}
+  {{- end }}
+{{- end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-deployment.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-deployment.yaml
new file mode 100644 (file)
index 0000000..bb13057
--- /dev/null
@@ -0,0 +1,145 @@
+{{- if and .Values.alertmanager.enabled (not .Values.alertmanager.statefulSet.enabled) -}}
+apiVersion: {{ template "prometheus.deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+  name: {{ template "prometheus.alertmanager.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  selector:
+    matchLabels:
+      {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }}
+  replicas: {{ .Values.alertmanager.replicaCount }}
+  {{- if .Values.alertmanager.strategy }}
+  strategy:
+{{ toYaml .Values.alertmanager.strategy | trim | indent 4 }}
+    {{ if eq .Values.alertmanager.strategy.type "Recreate" }}rollingUpdate: null{{ end }}
+{{- end }}
+  template:
+    metadata:
+    {{- if .Values.alertmanager.podAnnotations }}
+      annotations:
+{{ toYaml .Values.alertmanager.podAnnotations | indent 8 }}
+    {{- end }}
+      labels:
+        {{- include "prometheus.alertmanager.labels" . | nindent 8 }}
+        {{- if .Values.alertmanager.podLabels}}
+        {{ toYaml .Values.alertmanager.podLabels | nindent 8 }}
+        {{- end}}
+    spec:
+{{- if .Values.alertmanager.schedulerName }}
+      schedulerName: "{{ .Values.alertmanager.schedulerName }}"
+{{- end }}
+      serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }}
+      {{- if .Values.alertmanager.extraInitContainers }}
+      initContainers:
+{{ toYaml .Values.alertmanager.extraInitContainers | indent 8 }}
+      {{- end }}
+{{- if .Values.alertmanager.priorityClassName }}
+      priorityClassName: "{{ .Values.alertmanager.priorityClassName }}"
+{{- end }}
+      containers:
+        - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}
+          image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}"
+          imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}"
+          env:
+            {{- range $key, $value := .Values.alertmanager.extraEnv }}
+            - name: {{ $key }}
+              value: {{ $value }}
+            {{- end }}
+            - name: POD_IP
+              valueFrom:
+                fieldRef:
+                  apiVersion: v1
+                  fieldPath: status.podIP
+          args:
+            - --config.file=/etc/config/{{ .Values.alertmanager.configFileName }}
+            - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }}
+            - --cluster.advertise-address=$(POD_IP):6783
+          {{- range $key, $value := .Values.alertmanager.extraArgs }}
+            - --{{ $key }}={{ $value }}
+          {{- end }}
+          {{- if .Values.alertmanager.baseURL }}
+            - --web.external-url={{ .Values.alertmanager.baseURL }}
+          {{- end }}
+
+          ports:
+            - containerPort: 9093
+          readinessProbe:
+            httpGet:
+              path: {{ .Values.alertmanager.prefixURL }}/-/ready
+              port: 9093
+            initialDelaySeconds: 30
+            timeoutSeconds: 30
+          resources:
+{{ toYaml .Values.alertmanager.resources | indent 12 }}
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config
+            - name: storage-volume
+              mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}"
+              subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}"
+          {{- range .Values.alertmanager.extraSecretMounts }}
+            - name: {{ .name }}
+              mountPath: {{ .mountPath }}
+              subPath: {{ .subPath }}
+              readOnly: {{ .readOnly }}
+          {{- end }}
+
+        {{- if .Values.configmapReload.alertmanager.enabled }}
+        - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }}
+          image: "{{ .Values.configmapReload.alertmanager.image.repository }}:{{ .Values.configmapReload.alertmanager.image.tag }}"
+          imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}"
+          args:
+            - --volume-dir=/etc/config
+            - --webhook-url=http://127.0.0.1:9093{{ .Values.alertmanager.prefixURL }}/-/reload
+          resources:
+{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }}
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config
+              readOnly: true
+        {{- end }}
+    {{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+      {{ toYaml .Values.imagePullSecrets | indent 2 }}
+    {{- end }}
+    {{- if .Values.alertmanager.nodeSelector }}
+      nodeSelector:
+{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }}
+    {{- end }}
+    {{- if .Values.alertmanager.securityContext }}
+      securityContext:
+{{ toYaml .Values.alertmanager.securityContext | indent 8 }}
+    {{- end }}
+    {{- if .Values.alertmanager.tolerations }}
+      tolerations:
+{{ toYaml .Values.alertmanager.tolerations | indent 8 }}
+    {{- end }}
+    {{- if .Values.alertmanager.affinity }}
+      affinity:
+{{ toYaml .Values.alertmanager.affinity | indent 8 }}
+    {{- end }}
+      volumes:
+        - name: config-volume
+          {{- if empty .Values.alertmanager.configFromSecret }}
+          configMap:
+            name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }}
+          {{- else }}
+          secret:
+            secretName: {{ .Values.alertmanager.configFromSecret }}
+          {{- end }}
+      {{- range .Values.alertmanager.extraSecretMounts }}
+        - name: {{ .name }}
+          secret:
+            secretName: {{ .secretName }}
+      {{- end }}
+        - name: storage-volume
+        {{- if .Values.alertmanager.persistentVolume.enabled }}
+          persistentVolumeClaim:
+            claimName: {{ if .Values.alertmanager.persistentVolume.existingClaim }}{{ .Values.alertmanager.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }}
+        {{- else }}
+          emptyDir: {}
+        {{- end -}}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-ingress.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-ingress.yaml
new file mode 100644 (file)
index 0000000..b199b70
--- /dev/null
@@ -0,0 +1,43 @@
+{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled -}}
+{{- $releaseName := .Release.Name -}}
+{{- $serviceName := include "prometheus.alertmanager.fullname" . }}
+{{- $servicePort := .Values.alertmanager.service.servicePort -}}
+{{- $extraPaths := .Values.alertmanager.ingress.extraPaths -}}
+{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
+apiVersion: networking.k8s.io/v1beta1
+{{ else }}
+apiVersion: extensions/v1beta1
+{{ end -}}
+kind: Ingress
+metadata:
+{{- if .Values.alertmanager.ingress.annotations }}
+  annotations:
+{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }}
+{{- end }}
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+{{- range $key, $value := .Values.alertmanager.ingress.extraLabels }}
+    {{ $key }}: {{ $value }}
+{{- end }}
+  name: {{ template "prometheus.alertmanager.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  rules:
+  {{- range .Values.alertmanager.ingress.hosts }}
+    {{- $url := splitList "/" . }}
+    - host: {{ first $url }}
+      http:
+        paths:
+{{ if $extraPaths }}
+{{ toYaml $extraPaths | indent 10 }}
+{{- end }}
+          - path: /{{ rest $url | join "/" }}
+            backend:
+              serviceName: {{ $serviceName }}
+              servicePort: {{ $servicePort }}
+  {{- end -}}
+{{- if .Values.alertmanager.ingress.tls }}
+  tls:
+{{ toYaml .Values.alertmanager.ingress.tls | indent 4 }}
+  {{- end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-networkpolicy.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-networkpolicy.yaml
new file mode 100644 (file)
index 0000000..e44ade6
--- /dev/null
@@ -0,0 +1,20 @@
+{{- if and .Values.alertmanager.enabled .Values.networkPolicy.enabled -}}
+apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }}
+kind: NetworkPolicy
+metadata:
+  name: {{ template "prometheus.alertmanager.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+spec:
+  podSelector:
+    matchLabels:
+      {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }}
+  ingress:
+    - from:
+      - podSelector:
+          matchLabels:
+            {{- include "prometheus.server.matchLabels" . | nindent 12 }}
+    - ports:
+      - port: 9093
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-pdb.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-pdb.yaml
new file mode 100644 (file)
index 0000000..41a92f3
--- /dev/null
@@ -0,0 +1,14 @@
+{{- if .Values.alertmanager.podDisruptionBudget.enabled }}
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+  name: {{ template "prometheus.alertmanager.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+spec:
+  maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }}
+  selector:
+    matchLabels:
+      {{- include "prometheus.alertmanager.labels" . | nindent 6 }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-podsecuritypolicy.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-podsecuritypolicy.yaml
new file mode 100644 (file)
index 0000000..70f8033
--- /dev/null
@@ -0,0 +1,48 @@
+{{- if .Values.rbac.create }}
+{{- if .Values.podSecurityPolicy.enabled }}
+apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }}
+kind: PodSecurityPolicy
+metadata:
+  name: {{ template "prometheus.alertmanager.fullname" . }}
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+  annotations:
+{{- if .Values.alertmanager.podSecurityPolicy.annotations }}
+{{ toYaml .Values.alertmanager.podSecurityPolicy.annotations | indent 4 }}
+{{- end }}
+spec:
+  privileged: false
+  allowPrivilegeEscalation: false
+  requiredDropCapabilities:
+    - ALL
+  volumes:
+    - 'configMap'
+    - 'persistentVolumeClaim'
+    - 'emptyDir'
+    - 'secret'
+  allowedHostPaths:
+    - pathPrefix: /etc
+      readOnly: true
+    - pathPrefix: {{ .Values.alertmanager.persistentVolume.mountPath }}
+  hostNetwork: false
+  hostPID: false
+  hostIPC: false
+  runAsUser:
+    rule: 'RunAsAny'
+  seLinux:
+    rule: 'RunAsAny'
+  supplementalGroups:
+    rule: 'MustRunAs'
+    ranges:
+      # Forbid adding the root group.
+      - min: 1
+        max: 65535
+  fsGroup:
+    rule: 'MustRunAs'
+    ranges:
+      # Forbid adding the root group.
+      - min: 1
+        max: 65535
+  readOnlyRootFilesystem: true
+{{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-pvc.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-pvc.yaml
new file mode 100644 (file)
index 0000000..4fdab16
--- /dev/null
@@ -0,0 +1,33 @@
+{{- if not .Values.alertmanager.statefulSet.enabled -}}
+{{- if and .Values.alertmanager.enabled .Values.alertmanager.persistentVolume.enabled -}}
+{{- if not .Values.alertmanager.persistentVolume.existingClaim -}}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  {{- if .Values.alertmanager.persistentVolume.annotations }}
+  annotations:
+{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 4 }}
+  {{- end }}
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+  name: {{ template "prometheus.alertmanager.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  accessModes:
+{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 4 }}
+{{- if .Values.alertmanager.persistentVolume.storageClass }}
+{{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }}
+  storageClassName: ""
+{{- else }}
+  storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}"
+{{- end }}
+{{- end }}
+{{- if .Values.alertmanager.persistentVolume.volumeBindingMode }}
+  volumeBindingModeName: "{{ .Values.alertmanager.persistentVolume.volumeBindingMode }}"
+{{- end }}
+  resources:
+    requests:
+      storage: "{{ .Values.alertmanager.persistentVolume.size }}"
+{{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-service-headless.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-service-headless.yaml
new file mode 100644 (file)
index 0000000..8c402c4
--- /dev/null
@@ -0,0 +1,31 @@
+{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+{{- if .Values.alertmanager.statefulSet.headless.annotations }}
+  annotations:
+{{ toYaml .Values.alertmanager.statefulSet.headless.annotations | indent 4 }}
+{{- end }}
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+{{- if .Values.alertmanager.statefulSet.headless.labels }}
+{{ toYaml .Values.alertmanager.statefulSet.headless.labels | indent 4 }}
+{{- end }}
+  name: {{ template "prometheus.alertmanager.fullname" . }}-headless
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  clusterIP: None
+  ports:
+    - name: http
+      port: {{ .Values.alertmanager.statefulSet.headless.servicePort }}
+      protocol: TCP
+      targetPort: 9093
+{{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }}
+    - name: meshpeer
+      port: 6783
+      protocol: TCP
+      targetPort: 6783
+{{- end }}
+  selector:
+    {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-service.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-service.yaml
new file mode 100644 (file)
index 0000000..9edc9ac
--- /dev/null
@@ -0,0 +1,53 @@
+{{- if .Values.alertmanager.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+{{- if .Values.alertmanager.service.annotations }}
+  annotations:
+{{ toYaml .Values.alertmanager.service.annotations | indent 4 }}
+{{- end }}
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+{{- if .Values.alertmanager.service.labels }}
+{{ toYaml .Values.alertmanager.service.labels | indent 4 }}
+{{- end }}
+  name: {{ template "prometheus.alertmanager.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+{{- if .Values.alertmanager.service.clusterIP }}
+  clusterIP: {{ .Values.alertmanager.service.clusterIP }}
+{{- end }}
+{{- if .Values.alertmanager.service.externalIPs }}
+  externalIPs:
+{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }}
+{{- end }}
+{{- if .Values.alertmanager.service.loadBalancerIP }}
+  loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.alertmanager.service.loadBalancerSourceRanges }}
+  loadBalancerSourceRanges:
+  {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }}
+    - {{ $cidr }}
+  {{- end }}
+{{- end }}
+  ports:
+    - name: http
+      port: {{ .Values.alertmanager.service.servicePort }}
+      protocol: TCP
+      targetPort: 9093
+    {{- if .Values.alertmanager.service.nodePort }}
+      nodePort: {{ .Values.alertmanager.service.nodePort }}
+    {{- end }}
+{{- if .Values.alertmanager.service.enableMeshPeer }}
+    - name: meshpeer
+      port: 6783
+      protocol: TCP
+      targetPort: 6783
+{{- end }}
+  selector:
+    {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }}
+{{- if .Values.alertmanager.service.sessionAffinity }}
+  sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }}
+{{- end }}
+  type: "{{ .Values.alertmanager.service.type }}"
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-serviceaccount.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-serviceaccount.yaml
new file mode 100644 (file)
index 0000000..a5d996a
--- /dev/null
@@ -0,0 +1,11 @@
+{{- if and .Values.alertmanager.enabled .Values.serviceAccounts.alertmanager.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+  name: {{ template "prometheus.serviceAccountName.alertmanager" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+  annotations:
+{{ toYaml .Values.serviceAccounts.alertmanager.annotations | indent 4 }}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/alertmanager-statefulset.yaml b/helm/infrastructure/subcharts/prometheus/templates/alertmanager-statefulset.yaml
new file mode 100644 (file)
index 0000000..e2e5c91
--- /dev/null
@@ -0,0 +1,155 @@
+{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  labels:
+    {{- include "prometheus.alertmanager.labels" . | nindent 4 }}
+  name: {{ template "prometheus.alertmanager.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  serviceName: {{ template "prometheus.alertmanager.fullname" . }}-headless
+  selector:
+    matchLabels:
+      {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }}
+  replicas: {{ .Values.alertmanager.replicaCount }}
+  podManagementPolicy: {{ .Values.alertmanager.statefulSet.podManagementPolicy }}
+  template:
+    metadata:
+    {{- if .Values.alertmanager.podAnnotations }}
+      annotations:
+{{ toYaml .Values.alertmanager.podAnnotations | indent 8 }}
+    {{- end }}
+      labels:
+        {{- include "prometheus.alertmanager.labels" . | nindent 8 }}
+    spec:
+{{- if .Values.alertmanager.affinity }}
+      affinity:
+{{ toYaml .Values.alertmanager.affinity | indent 8 }}
+{{- end }}
+{{- if .Values.alertmanager.schedulerName }}
+      schedulerName: "{{ .Values.alertmanager.schedulerName }}"
+{{- end }}
+      serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }}
+{{- if .Values.alertmanager.priorityClassName }}
+      priorityClassName: "{{ .Values.alertmanager.priorityClassName }}"
+{{- end }}
+      containers:
+        - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}
+          image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}"
+          imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}"
+          env:
+            {{- range $key, $value := .Values.alertmanager.extraEnv }}
+            - name: {{ $key }}
+              value: {{ $value }}
+            {{- end }}
+            - name: POD_IP
+              valueFrom:
+                fieldRef:
+                  apiVersion: v1
+                  fieldPath: status.podIP
+          args:
+            - --config.file=/etc/config/alertmanager.yml
+            - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }}
+          {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }}
+            - --cluster.advertise-address=$(POD_IP):6783
+            - --cluster.listen-address=0.0.0.0:6783
+          {{- range $n := until (.Values.alertmanager.replicaCount | int) }}
+            - --cluster.peer={{ template "prometheus.alertmanager.fullname" $ }}-{{ $n }}.{{ template "prometheus.alertmanager.fullname" $ }}-headless:6783
+          {{- end }}
+          {{- else }}
+            - --cluster.listen-address=
+          {{- end }}
+          {{- range $key, $value := .Values.alertmanager.extraArgs }}
+            - --{{ $key }}={{ $value }}
+          {{- end }}
+          {{- if .Values.alertmanager.baseURL }}
+            - --web.external-url={{ .Values.alertmanager.baseURL }}
+          {{- end }}
+
+          ports:
+            - containerPort: 9093
+          readinessProbe:
+            httpGet:
+              path: {{ .Values.alertmanager.prefixURL }}/#/status
+              port: 9093
+            initialDelaySeconds: 30
+            timeoutSeconds: 30
+          resources:
+{{ toYaml .Values.alertmanager.resources | indent 12 }}
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config
+            - name: storage-volume
+              mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}"
+              subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}"
+          {{- range .Values.alertmanager.extraSecretMounts }}
+            - name: {{ .name }}
+              mountPath: {{ .mountPath }}
+              subPath: {{ .subPath }}
+              readOnly: {{ .readOnly }}
+          {{- end }}
+        {{- if .Values.configmapReload.alertmanager.enabled }}
+        - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }}
+          image: "{{ .Values.configmapReload.alertmanager.image.repository }}:{{ .Values.configmapReload.alertmanager.image.tag }}"
+          imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}"
+          args:
+            - --volume-dir=/etc/config
+            - --webhook-url=http://localhost:9093{{ .Values.alertmanager.prefixURL }}/-/reload
+          resources:
+{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }}
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config
+              readOnly: true
+        {{- end }}
+    {{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+      {{ toYaml .Values.imagePullSecrets | indent 2 }}
+    {{- end }}
+    {{- if .Values.alertmanager.nodeSelector }}
+      nodeSelector:
+{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }}
+    {{- end }}
+    {{- if .Values.alertmanager.securityContext }}
+      securityContext:
+{{ toYaml .Values.alertmanager.securityContext | indent 8 }}
+    {{- end }}
+    {{- if .Values.alertmanager.tolerations }}
+      tolerations:
+{{ toYaml .Values.alertmanager.tolerations | indent 8 }}
+    {{- end }}
+      volumes:
+        - name: config-volume
+          configMap:
+            name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }}
+      {{- range .Values.alertmanager.extraSecretMounts }}
+        - name: {{ .name }}
+          secret:
+            secretName: {{ .secretName }}
+      {{- end }}
+{{- if .Values.alertmanager.persistentVolume.enabled }}
+  volumeClaimTemplates:
+    - metadata:
+        name: storage-volume
+        {{- if .Values.alertmanager.persistentVolume.annotations }}
+        annotations:
+{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 10 }}
+        {{- end }}
+      spec:
+        accessModes:
+{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 10 }}
+        resources:
+          requests:
+            storage: "{{ .Values.alertmanager.persistentVolume.size }}"
+      {{- if .Values.server.persistentVolume.storageClass }}
+      {{- if (eq "-" .Values.server.persistentVolume.storageClass) }}
+        storageClassName: ""
+      {{- else }}
+        storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}"
+      {{- end }}
+      {{- end }}
+{{- else }}
+        - name: storage-volume
+          emptyDir: {}
+{{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/node-exporter-daemonset.yaml b/helm/infrastructure/subcharts/prometheus/templates/node-exporter-daemonset.yaml
new file mode 100644 (file)
index 0000000..4d6e333
--- /dev/null
@@ -0,0 +1,121 @@
+{{- if .Values.nodeExporter.enabled -}}
+apiVersion: {{ template "prometheus.daemonset.apiVersion" . }}
+kind: DaemonSet
+metadata:
+{{- if .Values.nodeExporter.deploymentAnnotations }}
+  annotations:
+{{ toYaml .Values.nodeExporter.deploymentAnnotations | indent 4 }}
+{{- end }}
+  labels:
+    {{- include "prometheus.nodeExporter.labels" . | nindent 4 }}
+  name: {{ template "prometheus.nodeExporter.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  selector:
+    matchLabels:
+      {{- include "prometheus.nodeExporter.matchLabels" . | nindent 6 }}
+  {{- if .Values.nodeExporter.updateStrategy }}
+  updateStrategy:
+{{ toYaml .Values.nodeExporter.updateStrategy | indent 4 }}
+  {{- end }}
+  template:
+    metadata:
+    {{- if .Values.nodeExporter.podAnnotations }}
+      annotations:
+{{ toYaml .Values.nodeExporter.podAnnotations | indent 8 }}
+    {{- end }}
+      labels:
+        {{- include "prometheus.nodeExporter.labels" . | nindent 8 }}
+{{- if .Values.nodeExporter.pod.labels }}
+{{ toYaml .Values.nodeExporter.pod.labels | indent 8 }}
+{{- end }}
+    spec:
+      serviceAccountName: {{ template "prometheus.serviceAccountName.nodeExporter" . }}
+      {{- if .Values.nodeExporter.extraInitContainers }}
+      initContainers:
+{{ toYaml .Values.nodeExporter.extraInitContainers | indent 8 }}
+      {{- end }}
+{{- if .Values.nodeExporter.priorityClassName }}
+      priorityClassName: "{{ .Values.nodeExporter.priorityClassName }}"
+{{- end }}
+      containers:
+        - name: {{ template "prometheus.name" . }}-{{ .Values.nodeExporter.name }}
+          image: "{{ .Values.nodeExporter.image.repository }}:{{ .Values.nodeExporter.image.tag }}"
+          imagePullPolicy: "{{ .Values.nodeExporter.image.pullPolicy }}"
+          args:
+            - --path.procfs=/host/proc
+            - --path.sysfs=/host/sys
+          {{- range $key, $value := .Values.nodeExporter.extraArgs }}
+          {{- if $value }}
+            - --{{ $key }}={{ $value }}
+          {{- else }}
+            - --{{ $key }}
+          {{- end }}
+          {{- end }}
+          ports:
+            - name: metrics
+              containerPort: 9100
+              hostPort: {{ .Values.nodeExporter.service.hostPort }}
+          resources:
+{{ toYaml .Values.nodeExporter.resources | indent 12 }}
+          volumeMounts:
+            - name: proc
+              mountPath: /host/proc
+              readOnly:  true
+            - name: sys
+              mountPath: /host/sys
+              readOnly: true
+          {{- range .Values.nodeExporter.extraHostPathMounts }}
+            - name: {{ .name }}
+              mountPath: {{ .mountPath }}
+              readOnly: {{ .readOnly }}
+            {{- if .mountPropagation }}
+              mountPropagation: {{ .mountPropagation }}
+            {{- end }}
+          {{- end }}
+          {{- range .Values.nodeExporter.extraConfigmapMounts }}
+            - name: {{ .name }}
+              mountPath: {{ .mountPath }}
+              readOnly: {{ .readOnly }}
+          {{- end }}
+    {{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+      {{ toYaml .Values.imagePullSecrets | indent 2 }}
+    {{- end }}
+    {{- if .Values.nodeExporter.hostNetwork }}
+      hostNetwork: true
+    {{- end }}
+    {{- if .Values.nodeExporter.hostPID }}
+      hostPID: true
+    {{- end }}
+    {{- if .Values.nodeExporter.tolerations }}
+      tolerations:
+{{ toYaml .Values.nodeExporter.tolerations | indent 8 }}
+    {{- end }}
+    {{- if .Values.nodeExporter.nodeSelector }}
+      nodeSelector:
+{{ toYaml .Values.nodeExporter.nodeSelector | indent 8 }}
+    {{- end }}
+    {{- if .Values.nodeExporter.securityContext }}
+      securityContext:
+{{ toYaml .Values.nodeExporter.securityContext | indent 8 }}
+    {{- end }}
+      volumes:
+        - name: proc
+          hostPath:
+            path: /proc
+        - name: sys
+          hostPath:
+            path: /sys
+      {{- range .Values.nodeExporter.extraHostPathMounts }}
+        - name: {{ .name }}
+          hostPath:
+            path: {{ .hostPath }}
+      {{- end }}
+      {{- range .Values.nodeExporter.extraConfigmapMounts }}
+        - name: {{ .name }}
+          configMap:
+            name: {{ .configMap }}
+      {{- end }}
+
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/node-exporter-podsecuritypolicy.yaml b/helm/infrastructure/subcharts/prometheus/templates/node-exporter-podsecuritypolicy.yaml
new file mode 100644 (file)
index 0000000..825794b
--- /dev/null
@@ -0,0 +1,55 @@
+{{- if and .Values.nodeExporter.enabled .Values.rbac.create }}
+{{- if .Values.podSecurityPolicy.enabled }}
+apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }}
+kind: PodSecurityPolicy
+metadata:
+  name: {{ template "prometheus.nodeExporter.fullname" . }}
+  labels:
+    {{- include "prometheus.nodeExporter.labels" . | nindent 4 }}
+  annotations:
+{{- if .Values.nodeExporter.podSecurityPolicy.annotations }}
+{{ toYaml .Values.nodeExporter.podSecurityPolicy.annotations | indent 4 }}
+{{- end }}
+spec:
+  privileged: false
+  allowPrivilegeEscalation: false
+  requiredDropCapabilities:
+    - ALL
+  volumes:
+    - 'configMap'
+    - 'hostPath'
+    - 'secret'
+  allowedHostPaths:
+    - pathPrefix: /proc
+      readOnly: true
+    - pathPrefix: /sys
+      readOnly: true
+  {{- range .Values.nodeExporter.extraHostPathMounts }}
+    - pathPrefix: {{ .hostPath }}
+      readOnly: {{ .readOnly }}
+  {{- end }}
+  hostNetwork: {{ .Values.nodeExporter.hostNetwork }}
+  hostPID: {{ .Values.nodeExporter.hostPID }}
+  hostIPC: false
+  runAsUser:
+    rule: 'RunAsAny'
+  seLinux:
+    rule: 'RunAsAny'
+  supplementalGroups:
+    rule: 'MustRunAs'
+    ranges:
+      # Forbid adding the root group.
+      - min: 1
+        max: 65535
+  fsGroup:
+    rule: 'MustRunAs'
+    ranges:
+      # Forbid adding the root group.
+      - min: 1
+        max: 65535
+  readOnlyRootFilesystem: false
+  hostPorts:
+    - min: 1
+      max: 65535
+{{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/node-exporter-role.yaml b/helm/infrastructure/subcharts/prometheus/templates/node-exporter-role.yaml
new file mode 100644 (file)
index 0000000..a6134a2
--- /dev/null
@@ -0,0 +1,17 @@
+{{- if and .Values.nodeExporter.enabled .Values.rbac.create }}
+{{- if or (default .Values.nodeExporter.podSecurityPolicy.enabled false) (.Values.podSecurityPolicy.enabled) }}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ template "prometheus.nodeExporter.fullname" . }}
+  labels:
+    {{- include "prometheus.nodeExporter.labels" . | nindent 4 }}
+{{ include "prometheus.namespace" . | indent 2 }}
+rules:
+- apiGroups: ['extensions']
+  resources: ['podsecuritypolicies']
+  verbs:     ['use']
+  resourceNames:
+  - {{ template "prometheus.nodeExporter.fullname" . }}
+{{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/node-exporter-rolebinding.yaml b/helm/infrastructure/subcharts/prometheus/templates/node-exporter-rolebinding.yaml
new file mode 100644 (file)
index 0000000..42c90d0
--- /dev/null
@@ -0,0 +1,19 @@
+{{- if and .Values.nodeExporter.enabled .Values.rbac.create }}
+{{- if .Values.podSecurityPolicy.enabled }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: {{ template "prometheus.nodeExporter.fullname" . }}
+  labels:
+    {{- include "prometheus.nodeExporter.labels" . | nindent 4 }}
+{{ include "prometheus.namespace" . | indent 2 }}
+roleRef:
+  kind: Role
+  name: {{ template "prometheus.nodeExporter.fullname" . }}
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+  name: {{ template "prometheus.serviceAccountName.nodeExporter" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+{{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/node-exporter-service.yaml b/helm/infrastructure/subcharts/prometheus/templates/node-exporter-service.yaml
new file mode 100644 (file)
index 0000000..b1833da
--- /dev/null
@@ -0,0 +1,41 @@
+{{- if .Values.nodeExporter.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+{{- if .Values.nodeExporter.service.annotations }}
+  annotations:
+{{ toYaml .Values.nodeExporter.service.annotations | indent 4 }}
+{{- end }}
+  labels:
+    {{- include "prometheus.nodeExporter.labels" . | nindent 4 }}
+{{- if .Values.nodeExporter.service.labels }}
+{{ toYaml .Values.nodeExporter.service.labels | indent 4 }}
+{{- end }}
+  name: {{ template "prometheus.nodeExporter.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+{{- if .Values.nodeExporter.service.clusterIP }}
+  clusterIP: {{ .Values.nodeExporter.service.clusterIP }}
+{{- end }}
+{{- if .Values.nodeExporter.service.externalIPs }}
+  externalIPs:
+{{ toYaml .Values.nodeExporter.service.externalIPs | indent 4 }}
+{{- end }}
+{{- if .Values.nodeExporter.service.loadBalancerIP }}
+  loadBalancerIP: {{ .Values.nodeExporter.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.nodeExporter.service.loadBalancerSourceRanges }}
+  loadBalancerSourceRanges:
+  {{- range $cidr := .Values.nodeExporter.service.loadBalancerSourceRanges }}
+    - {{ $cidr }}
+  {{- end }}
+{{- end }}
+  ports:
+    - name: metrics
+      port: {{ .Values.nodeExporter.service.servicePort }}
+      protocol: TCP
+      targetPort: 9100
+  selector:
+    {{- include "prometheus.nodeExporter.matchLabels" . | nindent 4 }}
+  type: "{{ .Values.nodeExporter.service.type }}"
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/node-exporter-serviceaccount.yaml b/helm/infrastructure/subcharts/prometheus/templates/node-exporter-serviceaccount.yaml
new file mode 100644 (file)
index 0000000..0cf91af
--- /dev/null
@@ -0,0 +1,11 @@
+{{- if and .Values.nodeExporter.enabled .Values.serviceAccounts.nodeExporter.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    {{- include "prometheus.nodeExporter.labels" . | nindent 4 }}
+  name: {{ template "prometheus.serviceAccountName.nodeExporter" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+  annotations:
+{{ toYaml .Values.serviceAccounts.nodeExporter.annotations | indent 4 }}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/pushgateway-clusterrole.yaml b/helm/infrastructure/subcharts/prometheus/templates/pushgateway-clusterrole.yaml
new file mode 100644 (file)
index 0000000..f4393c9
--- /dev/null
@@ -0,0 +1,21 @@
+{{- if and .Values.pushgateway.enabled .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  labels:
+    {{- include "prometheus.pushgateway.labels" . | nindent 4 }}
+  name: {{ template "prometheus.pushgateway.fullname" . }}
+rules:
+{{- if .Values.podSecurityPolicy.enabled }}
+  - apiGroups:
+    - extensions
+    resources:
+    - podsecuritypolicies
+    verbs:
+    - use
+    resourceNames:
+    - {{ template "prometheus.pushgateway.fullname" . }}
+{{- else }}
+  []
+{{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/pushgateway-clusterrolebinding.yaml b/helm/infrastructure/subcharts/prometheus/templates/pushgateway-clusterrolebinding.yaml
new file mode 100644 (file)
index 0000000..afdaaf1
--- /dev/null
@@ -0,0 +1,16 @@
+{{- if and .Values.pushgateway.enabled .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    {{- include "prometheus.pushgateway.labels" . | nindent 4 }}
+  name: {{ template "prometheus.pushgateway.fullname" . }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ template "prometheus.serviceAccountName.pushgateway" . }}
+{{ include "prometheus.namespace" . | indent 4 }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: {{ template "prometheus.pushgateway.fullname" . }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/pushgateway-deployment.yaml b/helm/infrastructure/subcharts/prometheus/templates/pushgateway-deployment.yaml
new file mode 100644 (file)
index 0000000..a9afc8a
--- /dev/null
@@ -0,0 +1,103 @@
+{{- if .Values.pushgateway.enabled -}}
+apiVersion: {{ template "prometheus.deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+  labels:
+    {{- include "prometheus.pushgateway.labels" . | nindent 4 }}
+  name: {{ template "prometheus.pushgateway.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  selector:
+    {{- if .Values.schedulerName }}
+    schedulerName: "{{ .Values.schedulerName }}"
+    {{- end }}
+    matchLabels:
+      {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }}
+  replicas: {{ .Values.pushgateway.replicaCount }}
+  {{- if .Values.pushgateway.strategy }}
+  strategy:
+{{ toYaml .Values.pushgateway.strategy | trim | indent 4 }}
+    {{ if eq .Values.pushgateway.strategy.type "Recreate" }}rollingUpdate: null{{ end }}
+{{- end }}
+  template:
+    metadata:
+    {{- if .Values.pushgateway.podAnnotations }}
+      annotations:
+{{ toYaml .Values.pushgateway.podAnnotations | indent 8 }}
+    {{- end }}
+      labels:
+        {{- include "prometheus.pushgateway.labels" . | nindent 8 }}
+    spec:
+      serviceAccountName: {{ template "prometheus.serviceAccountName.pushgateway" . }}
+      {{- if .Values.pushgateway.extraInitContainers }}
+      initContainers:
+{{ toYaml .Values.pushgateway.extraInitContainers | indent 8 }}
+      {{- end }}
+{{- if .Values.pushgateway.priorityClassName }}
+      priorityClassName: "{{ .Values.pushgateway.priorityClassName }}"
+{{- end }}
+      containers:
+        - name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }}
+          image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}"
+          imagePullPolicy: "{{ .Values.pushgateway.image.pullPolicy }}"
+          args:
+          {{- range $key, $value := .Values.pushgateway.extraArgs }}
+            - --{{ $key }}={{ $value }}
+          {{- end }}
+          ports:
+            - containerPort: 9091
+          livenessProbe:
+            httpGet:
+            {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }}
+              path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/healthy
+            {{- else }}
+              path: /-/healthy
+            {{- end }}
+              port: 9091
+            initialDelaySeconds: 10
+            timeoutSeconds: 10
+          readinessProbe:
+            httpGet:
+            {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }}
+              path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/ready
+            {{- else }}
+              path: /-/ready
+            {{- end }}
+              port: 9091
+            initialDelaySeconds: 10
+            timeoutSeconds: 10
+          resources:
+{{ toYaml .Values.pushgateway.resources | indent 12 }}
+          {{- if .Values.pushgateway.persistentVolume.enabled }}
+          volumeMounts:
+            - name: storage-volume
+              mountPath: "{{ .Values.pushgateway.persistentVolume.mountPath }}"
+              subPath: "{{ .Values.pushgateway.persistentVolume.subPath }}"
+          {{- end }}
+    {{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+      {{ toYaml .Values.imagePullSecrets | indent 2 }}
+    {{- end }}
+    {{- if .Values.pushgateway.nodeSelector }}
+      nodeSelector:
+{{ toYaml .Values.pushgateway.nodeSelector | indent 8 }}
+    {{- end }}
+    {{- if .Values.pushgateway.securityContext }}
+      securityContext:
+{{ toYaml .Values.pushgateway.securityContext | indent 8 }}
+    {{- end }}
+    {{- if .Values.pushgateway.tolerations }}
+      tolerations:
+{{ toYaml .Values.pushgateway.tolerations | indent 8 }}
+    {{- end }}
+    {{- if .Values.pushgateway.affinity }}
+      affinity:
+{{ toYaml .Values.pushgateway.affinity | indent 8 }}
+    {{- end }}
+      {{- if .Values.pushgateway.persistentVolume.enabled }}
+      volumes:
+        - name: storage-volume
+          persistentVolumeClaim:
+            claimName: {{ if .Values.pushgateway.persistentVolume.existingClaim }}{{ .Values.pushgateway.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.pushgateway.fullname" . }}{{- end }}
+      {{- end -}}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/pushgateway-ingress.yaml b/helm/infrastructure/subcharts/prometheus/templates/pushgateway-ingress.yaml
new file mode 100644 (file)
index 0000000..0c877d5
--- /dev/null
@@ -0,0 +1,40 @@
+{{- if and .Values.pushgateway.enabled .Values.pushgateway.ingress.enabled -}}
+{{- $releaseName := .Release.Name -}}
+{{- $serviceName := include "prometheus.pushgateway.fullname" . }}
+{{- $servicePort := .Values.pushgateway.service.servicePort -}}
+{{- $extraPaths := .Values.pushgateway.ingress.extraPaths -}}
+{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
+apiVersion: networking.k8s.io/v1beta1
+{{ else }}
+apiVersion: extensions/v1beta1
+{{ end -}}
+kind: Ingress
+metadata:
+{{- if .Values.pushgateway.ingress.annotations }}
+  annotations:
+{{ toYaml .Values.pushgateway.ingress.annotations | indent 4}}
+{{- end }}
+  labels:
+    {{- include "prometheus.pushgateway.labels" . | nindent 4 }}
+  name: {{ template "prometheus.pushgateway.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  rules:
+  {{- range .Values.pushgateway.ingress.hosts }}
+    {{- $url := splitList "/" . }}
+    - host: {{ first $url }}
+      http:
+        paths:
+{{ if $extraPaths }}
+{{ toYaml $extraPaths | indent 10 }}
+{{- end }}
+          - path: /{{ rest $url | join "/" }}
+            backend:
+              serviceName: {{ $serviceName }}
+              servicePort: {{ $servicePort }}
+  {{- end -}}
+{{- if .Values.pushgateway.ingress.tls }}
+  tls:
+{{ toYaml .Values.pushgateway.ingress.tls | indent 4 }}
+  {{- end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/pushgateway-networkpolicy.yaml b/helm/infrastructure/subcharts/prometheus/templates/pushgateway-networkpolicy.yaml
new file mode 100644 (file)
index 0000000..c8d1fb3
--- /dev/null
@@ -0,0 +1,20 @@
+{{- if and .Values.pushgateway.enabled .Values.networkPolicy.enabled -}}
+apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }}
+kind: NetworkPolicy
+metadata:
+  name: {{ template "prometheus.pushgateway.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+  labels:
+    {{- include "prometheus.pushgateway.labels" . | nindent 4 }}
+spec:
+  podSelector:
+    matchLabels:
+      {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }}
+  ingress:
+    - from:
+      - podSelector:
+          matchLabels:
+            {{- include "prometheus.server.matchLabels" . | nindent 12 }}
+    - ports:
+      - port: 9091
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/pushgateway-pdb.yaml b/helm/infrastructure/subcharts/prometheus/templates/pushgateway-pdb.yaml
new file mode 100644 (file)
index 0000000..50beb48
--- /dev/null
@@ -0,0 +1,14 @@
+{{- if .Values.pushgateway.podDisruptionBudget.enabled }}
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+  name: {{ template "prometheus.pushgateway.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+  labels:
+    {{- include "prometheus.pushgateway.labels" . | nindent 4 }}
+spec:
+  maxUnavailable: {{ .Values.pushgateway.podDisruptionBudget.maxUnavailable }}
+  selector:
+    matchLabels:
+      {{- include "prometheus.pushgateway.labels" . | nindent 6 }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/pushgateway-podsecuritypolicy.yaml b/helm/infrastructure/subcharts/prometheus/templates/pushgateway-podsecuritypolicy.yaml
new file mode 100644 (file)
index 0000000..dd3829d
--- /dev/null
@@ -0,0 +1,44 @@
+{{- if .Values.rbac.create }}
+{{- if .Values.podSecurityPolicy.enabled }}
+apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }}
+kind: PodSecurityPolicy
+metadata:
+  name: {{ template "prometheus.pushgateway.fullname" . }}
+  labels:
+    {{- include "prometheus.pushgateway.labels" . | nindent 4 }}
+  annotations:
+{{- if .Values.pushgateway.podSecurityPolicy.annotations }}
+{{ toYaml .Values.pushgateway.podSecurityPolicy.annotations | indent 4 }}
+{{- end }}
+spec:
+  privileged: false
+  allowPrivilegeEscalation: false
+  requiredDropCapabilities:
+    - ALL
+  volumes:
+    - 'persistentVolumeClaim'
+    - 'secret'
+  allowedHostPaths:
+    - pathPrefix: {{ .Values.pushgateway.persistentVolume.mountPath }}
+  hostNetwork: false
+  hostPID: false
+  hostIPC: false
+  runAsUser:
+    rule: 'RunAsAny'
+  seLinux:
+    rule: 'RunAsAny'
+  supplementalGroups:
+    rule: 'MustRunAs'
+    ranges:
+      # Forbid adding the root group.
+      - min: 1
+        max: 65535
+  fsGroup:
+    rule: 'MustRunAs'
+    ranges:
+      # Forbid adding the root group.
+      - min: 1
+        max: 65535
+  readOnlyRootFilesystem: true
+{{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/pushgateway-pvc.yaml b/helm/infrastructure/subcharts/prometheus/templates/pushgateway-pvc.yaml
new file mode 100644 (file)
index 0000000..227e7a9
--- /dev/null
@@ -0,0 +1,31 @@
+{{- if .Values.pushgateway.persistentVolume.enabled -}}
+{{- if not .Values.pushgateway.persistentVolume.existingClaim -}}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  {{- if .Values.pushgateway.persistentVolume.annotations }}
+  annotations:
+{{ toYaml .Values.pushgateway.persistentVolume.annotations | indent 4 }}
+  {{- end }}
+  labels:
+    {{- include "prometheus.pushgateway.labels" . | nindent 4 }}
+  name: {{ template "prometheus.pushgateway.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  accessModes:
+{{ toYaml .Values.pushgateway.persistentVolume.accessModes | indent 4 }}
+{{- if .Values.pushgateway.persistentVolume.storageClass }}
+{{- if (eq "-" .Values.pushgateway.persistentVolume.storageClass) }}
+  storageClassName: ""
+{{- else }}
+  storageClassName: "{{ .Values.pushgateway.persistentVolume.storageClass }}"
+{{- end }}
+{{- end }}
+{{- if .Values.pushgateway.persistentVolume.volumeBindingMode }}
+  volumeBindingModeName: "{{ .Values.pushgateway.persistentVolume.volumeBindingMode }}"
+{{- end }}
+  resources:
+    requests:
+      storage: "{{ .Values.pushgateway.persistentVolume.size }}"
+{{- end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/pushgateway-service.yaml b/helm/infrastructure/subcharts/prometheus/templates/pushgateway-service.yaml
new file mode 100644 (file)
index 0000000..f05f17c
--- /dev/null
@@ -0,0 +1,41 @@
+{{- if .Values.pushgateway.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+{{- if .Values.pushgateway.service.annotations }}
+  annotations:
+{{ toYaml .Values.pushgateway.service.annotations | indent 4}}
+{{- end }}
+  labels:
+    {{- include "prometheus.pushgateway.labels" . | nindent 4 }}
+{{- if .Values.pushgateway.service.labels }}
+{{ toYaml .Values.pushgateway.service.labels | indent 4}}
+{{- end }}
+  name: {{ template "prometheus.pushgateway.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+{{- if .Values.pushgateway.service.clusterIP }}
+  clusterIP: {{ .Values.pushgateway.service.clusterIP }}
+{{- end }}
+{{- if .Values.pushgateway.service.externalIPs }}
+  externalIPs:
+{{ toYaml .Values.pushgateway.service.externalIPs | indent 4 }}
+{{- end }}
+{{- if .Values.pushgateway.service.loadBalancerIP }}
+  loadBalancerIP: {{ .Values.pushgateway.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.pushgateway.service.loadBalancerSourceRanges }}
+  loadBalancerSourceRanges:
+  {{- range $cidr := .Values.pushgateway.service.loadBalancerSourceRanges }}
+    - {{ $cidr }}
+  {{- end }}
+{{- end }}
+  ports:
+    - name: http
+      port: {{ .Values.pushgateway.service.servicePort }}
+      protocol: TCP
+      targetPort: 9091
+  selector:
+    {{- include "prometheus.pushgateway.matchLabels" . | nindent 4 }}
+  type: "{{ .Values.pushgateway.service.type }}"
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/pushgateway-serviceaccount.yaml b/helm/infrastructure/subcharts/prometheus/templates/pushgateway-serviceaccount.yaml
new file mode 100644 (file)
index 0000000..8c0b876
--- /dev/null
@@ -0,0 +1,11 @@
+{{- if and .Values.pushgateway.enabled .Values.serviceAccounts.pushgateway.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    {{- include "prometheus.pushgateway.labels" . | nindent 4 }}
+  name: {{ template "prometheus.serviceAccountName.pushgateway" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+  annotations:
+{{ toYaml .Values.serviceAccounts.pushgateway.annotations | indent 4 }}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-clusterrole.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-clusterrole.yaml
new file mode 100644 (file)
index 0000000..c0c0585
--- /dev/null
@@ -0,0 +1,47 @@
+{{- if and .Values.server.enabled .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+  name: {{ template "prometheus.server.fullname" . }}
+rules:
+{{- if .Values.podSecurityPolicy.enabled }}
+  - apiGroups:
+    - extensions
+    resources:
+    - podsecuritypolicies
+    verbs:
+    - use
+    resourceNames:
+    - {{ template "prometheus.server.fullname" . }}
+{{- end }}
+  - apiGroups:
+      - ""
+    resources:
+      - nodes
+      - nodes/proxy
+      - nodes/metrics
+      - services
+      - endpoints
+      - pods
+      - ingresses
+      - configmaps
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - "extensions"
+    resources:
+      - ingresses/status
+      - ingresses
+    verbs:
+      - get
+      - list
+      - watch
+  - nonResourceURLs:
+      - "/metrics"
+    verbs:
+      - get
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-clusterrolebinding.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-clusterrolebinding.yaml
new file mode 100644 (file)
index 0000000..5beebfc
--- /dev/null
@@ -0,0 +1,16 @@
+{{- if and .Values.server.enabled .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+  name: {{ template "prometheus.server.fullname" . }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ template "prometheus.serviceAccountName.server" . }}
+{{ include "prometheus.namespace" . | indent 4 }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: {{ template "prometheus.server.fullname" . }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-configmap.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-configmap.yaml
new file mode 100644 (file)
index 0000000..0838bb3
--- /dev/null
@@ -0,0 +1,82 @@
+{{- if .Values.server.enabled -}}
+{{- if (empty .Values.server.configMapOverrideName) -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+  name: {{ template "prometheus.server.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+data:
+{{- $root := . -}}
+{{- range $key, $value := .Values.serverFiles }}
+  {{ $key }}: |
+{{- if eq $key "prometheus.yml" }}
+    global:
+{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }}
+{{- if $root.Values.server.remoteWrite }}
+    remote_write:
+{{ $root.Values.server.remoteWrite | toYaml | indent 4 }}
+{{- end }}
+{{- if $root.Values.server.remoteRead }}
+    remote_read:
+{{ $root.Values.server.remoteRead | toYaml | indent 4 }}
+{{- end }}
+{{- end }}
+{{- if eq $key "alerts" }}
+{{- if and (not (empty $value)) (empty $value.groups) }}
+    groups:
+{{- range $ruleKey, $ruleValue := $value }}
+    - name: {{ $ruleKey -}}.rules
+      rules:
+{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }}
+{{- end }}
+{{- else }}
+{{ toYaml $value | indent 4 }}
+{{- end }}
+{{- else }}
+{{ toYaml $value | default "{}" | indent 4 }}
+{{- end }}
+{{- if eq $key "prometheus.yml" -}}
+{{- if $root.Values.extraScrapeConfigs }}
+{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }}
+{{- end -}}
+{{- if or ($root.Values.alertmanager.enabled) ($root.Values.server.alertmanagers) }}
+    alerting:
+{{- if $root.Values.alertRelabelConfigs }}
+{{ $root.Values.alertRelabelConfigs | toYaml  | trimSuffix "\n" | indent 6 }}
+{{- end }}
+      alertmanagers:
+{{- if $root.Values.server.alertmanagers }}
+{{ toYaml $root.Values.server.alertmanagers | indent 8 }}
+{{- else }}
+      - kubernetes_sd_configs:
+          - role: pod
+        tls_config:
+          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+        {{- if $root.Values.alertmanager.prefixURL }}
+        path_prefix: {{ $root.Values.alertmanager.prefixURL }}
+        {{- end }}
+        relabel_configs:
+        - source_labels: [__meta_kubernetes_namespace]
+          regex: {{ $root.Release.Namespace }}
+          action: keep
+        - source_labels: [__meta_kubernetes_pod_label_app]
+          regex: {{ template "prometheus.name" $root }}
+          action: keep
+        - source_labels: [__meta_kubernetes_pod_label_component]
+          regex: alertmanager
+          action: keep
+        - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_probe]
+          regex: {{ index $root.Values.alertmanager.podAnnotations "prometheus.io/probe" | default ".*" }}
+          action: keep
+        - source_labels: [__meta_kubernetes_pod_container_port_number]
+          regex:
+          action: drop
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-deployment.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-deployment.yaml
new file mode 100644 (file)
index 0000000..e91e7b5
--- /dev/null
@@ -0,0 +1,220 @@
+{{- if .Values.server.enabled -}}
+{{- if not .Values.server.statefulSet.enabled -}}
+apiVersion: {{ template "prometheus.deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+{{- if .Values.server.deploymentAnnotations }}
+  annotations:
+{{ toYaml .Values.server.deploymentAnnotations | indent 4 }}
+{{- end }}
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+  name: {{ template "prometheus.server.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  selector:
+    matchLabels:
+      {{- include "prometheus.server.matchLabels" . | nindent 6 }}
+  replicas: {{ .Values.server.replicaCount }}
+  {{- if .Values.server.strategy }}
+  strategy:
+{{ toYaml .Values.server.strategy | trim | indent 4 }}
+    {{ if eq .Values.server.strategy.type "Recreate" }}rollingUpdate: null{{ end }}
+{{- end }}
+  template:
+    metadata:
+    {{- if .Values.server.podAnnotations }}
+      annotations:
+{{ toYaml .Values.server.podAnnotations | indent 8 }}
+    {{- end }}
+      labels:
+        {{- include "prometheus.server.labels" . | nindent 8 }}
+        {{- if .Values.server.podLabels}}
+        {{ toYaml .Values.server.podLabels | nindent 8 }}
+        {{- end}}
+    spec:
+{{- if .Values.server.priorityClassName }}
+      priorityClassName: "{{ .Values.server.priorityClassName }}"
+{{- end }}
+{{- if .Values.server.schedulerName }}
+      schedulerName: "{{ .Values.server.schedulerName }}"
+{{- end }}
+      serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }}
+      {{- if .Values.server.extraInitContainers }}
+      initContainers:
+{{ toYaml .Values.server.extraInitContainers | indent 8 }}
+      {{- end }}
+      containers:
+        {{- if .Values.configmapReload.prometheus.enabled }}
+        - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }}
+          image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}"
+          imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}"
+          args:
+            - --volume-dir=/etc/config
+            - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload
+          {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }}
+            - --{{ $key }}={{ $value }}
+          {{- end }}
+          {{- range .Values.configmapReload.prometheus.extraVolumeDirs }}
+            - --volume-dir={{ . }}
+          {{- end }}
+          resources:
+{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }}
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config
+              readOnly: true
+          {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }}
+            - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }}
+              mountPath: {{ .mountPath }}
+              subPath: {{ .subPath }}
+              readOnly: {{ .readOnly }}
+          {{- end }}
+        {{- end }}
+
+        - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}
+          image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}"
+          imagePullPolicy: "{{ .Values.server.image.pullPolicy }}"
+          {{- if .Values.server.env }}
+          env:
+{{ toYaml .Values.server.env | indent 12}}
+          {{- end }}
+          args:
+          {{- if .Values.server.retention }}
+            - --storage.tsdb.retention.time={{ .Values.server.retention }}
+          {{- end }}
+            - --config.file={{ .Values.server.configPath }}
+            - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }}
+            - --web.console.libraries=/etc/prometheus/console_libraries
+            - --web.console.templates=/etc/prometheus/consoles
+          {{- range .Values.server.extraFlags }}
+            - --{{ . }}
+          {{- end }}
+          {{- if .Values.server.baseURL }}
+            - --web.external-url={{ .Values.server.baseURL }}
+          {{- end }}
+
+          {{- range $key, $value := .Values.server.extraArgs }}
+            - --{{ $key }}={{ $value }}
+          {{- end }}
+          ports:
+            - containerPort: 9090
+          readinessProbe:
+            httpGet:
+              path: {{ .Values.server.prefixURL }}/-/ready
+              port: 9090
+            initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }}
+            timeoutSeconds: {{ .Values.server.readinessProbeTimeout }}
+            failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }}
+            successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }}
+          livenessProbe:
+            httpGet:
+              path: {{ .Values.server.prefixURL }}/-/healthy
+              port: 9090
+            initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }}
+            timeoutSeconds: {{ .Values.server.livenessProbeTimeout }}
+            failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }}
+            successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }}
+          resources:
+{{ toYaml .Values.server.resources | indent 12 }}
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config
+            - name: storage-volume
+              mountPath: {{ .Values.server.persistentVolume.mountPath }}
+              subPath: "{{ .Values.server.persistentVolume.subPath }}"
+          {{- range .Values.server.extraHostPathMounts }}
+            - name: {{ .name }}
+              mountPath: {{ .mountPath }}
+              subPath: {{ .subPath }}
+              readOnly: {{ .readOnly }}
+          {{- end }}
+          {{- range .Values.server.extraConfigmapMounts }}
+            - name: {{ $.Values.server.name }}-{{ .name }}
+              mountPath: {{ .mountPath }}
+              subPath: {{ .subPath }}
+              readOnly: {{ .readOnly }}
+          {{- end }}
+          {{- range .Values.server.extraSecretMounts }}
+            - name: {{ .name }}
+              mountPath: {{ .mountPath }}
+              subPath: {{ .subPath }}
+              readOnly: {{ .readOnly }}
+          {{- end }}
+          {{- if .Values.server.extraVolumeMounts }}
+            {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }}
+          {{- end }}
+      {{- if .Values.server.sidecarContainers }}
+      {{- toYaml .Values.server.sidecarContainers | nindent 8 }}
+      {{- end }}
+    {{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+       {{ toYaml .Values.imagePullSecrets | indent 2 }}
+    {{- end }}
+    {{- if .Values.server.nodeSelector }}
+      nodeSelector:
+{{ toYaml .Values.server.nodeSelector | indent 8 }}
+    {{- end }}
+    {{- if .Values.server.hostAliases }}
+      hostAliases:
+{{ toYaml .Values.server.hostAliases | indent 8 }}
+    {{- end }}
+    {{- if .Values.server.securityContext }}
+      securityContext:
+{{ toYaml .Values.server.securityContext | indent 8 }}
+    {{- end }}
+    {{- if .Values.server.tolerations }}
+      tolerations:
+{{ toYaml .Values.server.tolerations | indent 8 }}
+    {{- end }}
+    {{- if .Values.server.affinity }}
+      affinity:
+{{ toYaml .Values.server.affinity | indent 8 }}
+    {{- end }}
+      terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }}
+      volumes:
+        - name: config-volume
+          configMap:
+            name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }}
+        - name: storage-volume
+        {{- if .Values.server.persistentVolume.enabled }}
+          persistentVolumeClaim:
+            claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }}
+        {{- else }}
+          emptyDir:
+          {{- if .Values.server.emptyDir.sizeLimit }}
+            sizeLimit: {{ .Values.server.emptyDir.sizeLimit }}
+          {{- else }}
+            {}
+          {{- end -}}
+        {{- end -}}
+{{- if .Values.server.extraVolumes }}
+{{ toYaml .Values.server.extraVolumes | indent 8}}
+{{- end }}
+      {{- range .Values.server.extraHostPathMounts }}
+        - name: {{ .name }}
+          hostPath:
+            path: {{ .hostPath }}
+      {{- end }}
+      {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }}
+        - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }}
+          configMap:
+            name: {{ .configMap }}
+      {{- end }}
+      {{- range .Values.server.extraConfigmapMounts }}
+        - name: {{ $.Values.server.name }}-{{ .name }}
+          configMap:
+            name: {{ .configMap }}
+      {{- end }}
+      {{- range .Values.server.extraSecretMounts }}
+        - name: {{ .name }}
+          secret:
+            secretName: {{ .secretName }}
+      {{- end }}
+      {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }}
+        - name: {{ .name }}
+          configMap:
+            name: {{ .configMap }}
+      {{- end }}
+{{- end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-ingress.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-ingress.yaml
new file mode 100644 (file)
index 0000000..b3a51fa
--- /dev/null
@@ -0,0 +1,45 @@
+{{- if .Values.server.enabled -}}
+{{- if .Values.server.ingress.enabled -}}
+{{- $releaseName := .Release.Name -}}
+{{- $serviceName := include "prometheus.server.fullname" . }}
+{{- $servicePort := .Values.server.service.servicePort -}}
+{{- $extraPaths := .Values.server.ingress.extraPaths -}}
+{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
+apiVersion: networking.k8s.io/v1beta1
+{{ else }}
+apiVersion: extensions/v1beta1
+{{ end -}}
+kind: Ingress
+metadata:
+{{- if .Values.server.ingress.annotations }}
+  annotations:
+{{ toYaml .Values.server.ingress.annotations | indent 4 }}
+{{- end }}
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+{{- range $key, $value := .Values.server.ingress.extraLabels }}
+    {{ $key }}: {{ $value }}
+{{- end }}
+  name: {{ template "prometheus.server.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  rules:
+  {{- range .Values.server.ingress.hosts }}
+    {{- $url := splitList "/" . }}
+    - host: {{ first $url }}
+      http:
+        paths:
+{{ if $extraPaths }}
+{{ toYaml $extraPaths | indent 10 }}
+{{- end }}
+          - path: /{{ rest $url | join "/" }}
+            backend:
+              serviceName: {{ $serviceName }}
+              servicePort: {{ $servicePort }}
+  {{- end -}}
+{{- if .Values.server.ingress.tls }}
+  tls:
+{{ toYaml .Values.server.ingress.tls | indent 4 }}
+  {{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-networkpolicy.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-networkpolicy.yaml
new file mode 100644 (file)
index 0000000..c8870e9
--- /dev/null
@@ -0,0 +1,18 @@
+{{- if .Values.server.enabled -}}
+{{- if .Values.networkPolicy.enabled }}
+apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }}
+kind: NetworkPolicy
+metadata:
+  name: {{ template "prometheus.server.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+spec:
+  podSelector:
+    matchLabels:
+      {{- include "prometheus.server.matchLabels" . | nindent 6 }}
+  ingress:
+    - ports:
+      - port: 9090
+{{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-pdb.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-pdb.yaml
new file mode 100644 (file)
index 0000000..364cb5b
--- /dev/null
@@ -0,0 +1,14 @@
+{{- if .Values.server.podDisruptionBudget.enabled }}
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+  name: {{ template "prometheus.server.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+spec:
+  maxUnavailable: {{ .Values.server.podDisruptionBudget.maxUnavailable }}
+  selector:
+    matchLabels:
+      {{- include "prometheus.server.labels" . | nindent 6 }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-podsecuritypolicy.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-podsecuritypolicy.yaml
new file mode 100644 (file)
index 0000000..a0e15a3
--- /dev/null
@@ -0,0 +1,53 @@
+{{- if .Values.rbac.create }}
+{{- if .Values.podSecurityPolicy.enabled }}
+apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }}
+kind: PodSecurityPolicy
+metadata:
+  name: {{ template "prometheus.server.fullname" . }}
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+  annotations:
+{{- if .Values.server.podSecurityPolicy.annotations }}
+{{ toYaml .Values.server.podSecurityPolicy.annotations | indent 4 }}
+{{- end }}
+spec:
+  privileged: false
+  allowPrivilegeEscalation: false
+  allowedCapabilities:
+    - 'CHOWN'
+  volumes:
+    - 'configMap'
+    - 'persistentVolumeClaim'
+    - 'emptyDir'
+    - 'secret'
+    - 'hostPath'
+  allowedHostPaths:
+    - pathPrefix: /etc
+      readOnly: true
+    - pathPrefix: {{ .Values.server.persistentVolume.mountPath }}
+  {{- range .Values.server.extraHostPathMounts }}
+    - pathPrefix: {{ .hostPath }}
+      readOnly: {{ .readOnly }}
+  {{- end }}
+  hostNetwork: false
+  hostPID: false
+  hostIPC: false
+  runAsUser:
+    rule: 'RunAsAny'
+  seLinux:
+    rule: 'RunAsAny'
+  supplementalGroups:
+    rule: 'MustRunAs'
+    ranges:
+      # Forbid adding the root group.
+      - min: 1
+        max: 65535
+  fsGroup:
+    rule: 'MustRunAs'
+    ranges:
+      # Forbid adding the root group.
+      - min: 1
+        max: 65535
+  readOnlyRootFilesystem: false
+{{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-pvc.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-pvc.yaml
new file mode 100644 (file)
index 0000000..fc40c32
--- /dev/null
@@ -0,0 +1,35 @@
+{{- if .Values.server.enabled -}}
+{{- if not .Values.server.statefulSet.enabled -}}
+{{- if .Values.server.persistentVolume.enabled -}}
+{{- if not .Values.server.persistentVolume.existingClaim -}}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  {{- if .Values.server.persistentVolume.annotations }}
+  annotations:
+{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }}
+  {{- end }}
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+  name: {{ template "prometheus.server.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  accessModes:
+{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }}
+{{- if .Values.server.persistentVolume.storageClass }}
+{{- if (eq "-" .Values.server.persistentVolume.storageClass) }}
+  storageClassName: ""
+{{- else }}
+  storageClassName: "{{ .Values.server.persistentVolume.storageClass }}"
+{{- end }}
+{{- end }}
+{{- if .Values.server.persistentVolume.volumeBindingMode }}
+  volumeBindingModeName: "{{ .Values.server.persistentVolume.volumeBindingMode }}"
+{{- end }}
+  resources:
+    requests:
+      storage: "{{ .Values.server.persistentVolume.size }}"
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-service-headless.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-service-headless.yaml
new file mode 100644 (file)
index 0000000..001ec32
--- /dev/null
@@ -0,0 +1,27 @@
+{{- if .Values.server.enabled -}}
+{{- if .Values.server.statefulSet.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+{{- if .Values.server.statefulSet.headless.annotations }}
+  annotations:
+{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }}
+{{- end }}
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+{{- if .Values.server.statefulSet.headless.labels }}
+{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }}
+{{- end }}
+  name: {{ template "prometheus.server.fullname" . }}-headless
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  clusterIP: None
+  ports:
+    - name: http
+      port: {{ .Values.server.statefulSet.headless.servicePort }}
+      protocol: TCP
+      targetPort: 9090
+  selector:
+    {{- include "prometheus.server.matchLabels" . | nindent 4 }}
+{{- end -}}
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-service.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-service.yaml
new file mode 100644 (file)
index 0000000..68f9889
--- /dev/null
@@ -0,0 +1,60 @@
+{{- if .Values.server.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+{{- if .Values.server.service.annotations }}
+  annotations:
+{{ toYaml .Values.server.service.annotations | indent 4 }}
+{{- end }}
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+{{- if .Values.server.service.labels }}
+{{ toYaml .Values.server.service.labels | indent 4 }}
+{{- end }}
+  name: {{ template "prometheus.server.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+{{- if .Values.server.service.clusterIP }}
+  clusterIP: {{ .Values.server.service.clusterIP }}
+{{- end }}
+{{- if .Values.server.service.externalIPs }}
+  externalIPs:
+{{ toYaml .Values.server.service.externalIPs | indent 4 }}
+{{- end }}
+{{- if .Values.server.service.loadBalancerIP }}
+  loadBalancerIP: {{ .Values.server.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.server.service.loadBalancerSourceRanges }}
+  loadBalancerSourceRanges:
+  {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }}
+    - {{ $cidr }}
+  {{- end }}
+{{- end }}
+  ports:
+    - name: http
+      port: {{ .Values.server.service.servicePort }}
+      protocol: TCP
+      targetPort: 9090
+    {{- if .Values.server.service.nodePort }}
+      nodePort: {{ .Values.server.service.nodePort }}
+    {{- end }}
+    {{- if .Values.server.service.gRPC.enabled }}
+    - name: grpc
+      port: {{ .Values.server.service.gRPC.servicePort }}
+      protocol: TCP
+      targetPort: 10901
+    {{- if .Values.server.service.gRPC.nodePort }}
+      nodePort: {{ .Values.server.service.gRPC.nodePort }}
+    {{- end }}
+    {{- end }}
+  selector:
+  {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }}
+    statefulset.kubernetes.io/pod-name: {{ template "prometheus.server.fullname" . }}-{{ .Values.server.service.statefulsetReplica.replica }}
+  {{- else -}}
+    {{- include "prometheus.server.matchLabels" . | nindent 4 }}
+{{- if .Values.server.service.sessionAffinity }}
+  sessionAffinity: {{ .Values.server.service.sessionAffinity }}
+{{- end }}
+  {{- end }}
+  type: "{{ .Values.server.service.type }}"
+{{- end -}}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-serviceaccount.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-serviceaccount.yaml
new file mode 100644 (file)
index 0000000..9c0502a
--- /dev/null
@@ -0,0 +1,13 @@
+{{- if .Values.server.enabled -}}
+{{- if .Values.serviceAccounts.server.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+  name: {{ template "prometheus.serviceAccountName.server" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+  annotations:
+{{ toYaml .Values.serviceAccounts.server.annotations | indent 4 }}
+{{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-statefulset.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-statefulset.yaml
new file mode 100644 (file)
index 0000000..37d9e7d
--- /dev/null
@@ -0,0 +1,228 @@
+{{- if .Values.server.enabled -}}
+{{- if .Values.server.statefulSet.enabled -}}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+{{- if .Values.server.statefulSet.annotations }}
+  annotations:
+{{ toYaml .Values.server.statefulSet.annotations | indent 4 }}
+{{- end }}
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+    {{- if .Values.server.statefulSet.labels}}
+    {{ toYaml .Values.server.statefulSet.labels | nindent 4 }}
+    {{- end}}
+  name: {{ template "prometheus.server.fullname" . }}
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  serviceName: {{ template "prometheus.server.fullname" . }}-headless
+  selector:
+    matchLabels:
+      {{- include "prometheus.server.matchLabels" . | nindent 6 }}
+  replicas: {{ .Values.server.replicaCount }}
+  podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }}
+  template:
+    metadata:
+    {{- if .Values.server.podAnnotations }}
+      annotations:
+{{ toYaml .Values.server.podAnnotations | indent 8 }}
+    {{- end }}
+      labels:
+        {{- include "prometheus.server.labels" . | nindent 8 }}
+        {{- if .Values.server.statefulSet.labels}}
+        {{ toYaml .Values.server.statefulSet.labels | nindent 8 }}
+        {{- end}}
+    spec:
+{{- if .Values.server.priorityClassName }}
+      priorityClassName: "{{ .Values.server.priorityClassName }}"
+{{- end }}
+{{- if .Values.server.schedulerName }}
+      schedulerName: "{{ .Values.server.schedulerName }}"
+{{- end }}
+      {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "<nil>") }}
+      enableServiceLinks: true
+      {{- else }}
+      enableServiceLinks: false
+      {{- end }}
+      serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }}
+      containers:
+        {{- if .Values.configmapReload.prometheus.enabled }}
+        - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }}
+          image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}"
+          imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}"
+          args:
+            - --volume-dir=/etc/config
+            - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload
+          {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }}
+            - --{{ $key }}={{ $value }}
+          {{- end }}
+          {{- range .Values.configmapReload.prometheus.extraVolumeDirs }}
+            - --volume-dir={{ . }}
+          {{- end }}
+          resources:
+{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }}
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config
+              readOnly: true
+          {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }}
+            - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }}
+              mountPath: {{ .mountPath }}
+              subPath: {{ .subPath }}
+              readOnly: {{ .readOnly }}
+          {{- end }}
+        {{- end }}
+        - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}
+          image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}"
+          imagePullPolicy: "{{ .Values.server.image.pullPolicy }}"
+          {{- if .Values.server.env }}
+          env:
+{{ toYaml .Values.server.env | indent 12}}
+          {{- end }}
+          args:
+          {{- if .Values.server.retention }}
+            - --storage.tsdb.retention.time={{ .Values.server.retention }}
+          {{- end }}
+            - --config.file={{ .Values.server.configPath }}
+            - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }}
+            - --web.console.libraries=/etc/prometheus/console_libraries
+            - --web.console.templates=/etc/prometheus/consoles
+          {{- range .Values.server.extraFlags }}
+            - --{{ . }}
+          {{- end }}
+          {{- range $key, $value := .Values.server.extraArgs }}
+            - --{{ $key }}={{ $value }}
+          {{- end }}
+          {{- if .Values.server.baseURL }}
+            - --web.external-url={{ .Values.server.baseURL }}
+          {{- end }}
+          ports:
+            - containerPort: 9090
+          readinessProbe:
+            httpGet:
+              path: {{ .Values.server.prefixURL }}/-/ready
+              port: 9090
+            initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }}
+            timeoutSeconds: {{ .Values.server.readinessProbeTimeout }}
+          livenessProbe:
+            httpGet:
+              path: {{ .Values.server.prefixURL }}/-/healthy
+              port: 9090
+            initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }}
+            timeoutSeconds: {{ .Values.server.livenessProbeTimeout }}
+          resources:
+{{ toYaml .Values.server.resources | indent 12 }}
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config
+            - name: storage-volume
+              mountPath: {{ .Values.server.persistentVolume.mountPath }}
+              subPath: "{{ .Values.server.persistentVolume.subPath }}"
+          {{- range .Values.server.extraHostPathMounts }}
+            - name: {{ .name }}
+              mountPath: {{ .mountPath }}
+              subPath: {{ .subPath }}
+              readOnly: {{ .readOnly }}
+          {{- end }}
+          {{- range .Values.server.extraConfigmapMounts }}
+            - name: {{ $.Values.server.name }}-{{ .name }}
+              mountPath: {{ .mountPath }}
+              subPath: {{ .subPath }}
+              readOnly: {{ .readOnly }}
+          {{- end }}
+          {{- range .Values.server.extraSecretMounts }}
+            - name: {{ .name }}
+              mountPath: {{ .mountPath }}
+              subPath: {{ .subPath }}
+              readOnly: {{ .readOnly }}
+          {{- end }}
+          {{- if .Values.server.extraVolumeMounts }}
+          {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }}
+          {{- end }}
+       {{- if .Values.server.sidecarContainers }}
+       {{- toYaml .Values.server.sidecarContainers | nindent 8 }}
+       {{- end }}
+    {{- if .Values.imagePullSecrets }}
+      imagePullSecrets:
+       {{ toYaml .Values.imagePullSecrets | indent 2 }}
+    {{- end }}
+    {{- if .Values.server.nodeSelector }}
+      nodeSelector:
+{{ toYaml .Values.server.nodeSelector | indent 8 }}
+    {{- end }}
+    {{- if .Values.server.hostAliases }}
+      hostAliases:
+{{ toYaml .Values.server.hostAliases | indent 8 }}
+    {{- end }}
+    {{- if .Values.server.securityContext }}
+      securityContext:
+{{ toYaml .Values.server.securityContext | indent 8 }}
+    {{- end }}
+    {{- if .Values.server.tolerations }}
+      tolerations:
+{{ toYaml .Values.server.tolerations | indent 8 }}
+    {{- end }}
+    {{- if .Values.server.affinity }}
+      affinity:
+{{ toYaml .Values.server.affinity | indent 8 }}
+    {{- end }}
+      terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }}
+      volumes:
+        - name: config-volume
+          configMap:
+            name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }}
+      {{- range .Values.server.extraHostPathMounts }}
+        - name: {{ .name }}
+          hostPath:
+            path: {{ .hostPath }}
+      {{- end }}
+      {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }}
+        - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }}
+          configMap:
+            name: {{ .configMap }}
+      {{- end }}
+      {{- range .Values.server.extraConfigmapMounts }}
+        - name: {{ $.Values.server.name }}-{{ .name }}
+          configMap:
+            name: {{ .configMap }}
+      {{- end }}
+      {{- range .Values.server.extraSecretMounts }}
+        - name: {{ .name }}
+          secret:
+            secretName: {{ .secretName }}
+      {{- end }}
+      {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }}
+        - name: {{ .name }}
+          configMap:
+            name: {{ .configMap }}
+      {{- end }}
+{{- if .Values.server.extraVolumes }}
+{{ toYaml .Values.server.extraVolumes | indent 8}}
+{{- end }}
+{{- if .Values.server.persistentVolume.enabled }}
+  volumeClaimTemplates:
+    - metadata:
+        name: storage-volume
+        {{- if .Values.server.persistentVolume.annotations }}
+        annotations:
+{{ toYaml .Values.server.persistentVolume.annotations | indent 10 }}
+        {{- end }}
+      spec:
+        accessModes:
+{{ toYaml .Values.server.persistentVolume.accessModes | indent 10 }}
+        resources:
+          requests:
+            storage: "{{ .Values.server.persistentVolume.size }}"
+      {{- if .Values.server.persistentVolume.storageClass }}
+      {{- if (eq "-" .Values.server.persistentVolume.storageClass) }}
+        storageClassName: ""
+      {{- else }}
+        storageClassName: "{{ .Values.server.persistentVolume.storageClass }}"
+      {{- end }}
+      {{- end }}
+{{- else }}
+        - name: storage-volume
+          emptyDir: {}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/helm/infrastructure/subcharts/prometheus/templates/server-vpa.yaml b/helm/infrastructure/subcharts/prometheus/templates/server-vpa.yaml
new file mode 100644 (file)
index 0000000..8f55b9b
--- /dev/null
@@ -0,0 +1,25 @@
+{{- if .Values.server.enabled -}}
+{{- if .Values.server.verticalAutoscaler.enabled -}}
+apiVersion: autoscaling.k8s.io/v1beta2
+kind: VerticalPodAutoscaler
+metadata:
+  labels:
+    {{- include "prometheus.server.labels" . | nindent 4 }}
+  name: {{ template "prometheus.server.fullname" . }}-vpa
+{{ include "prometheus.namespace" . | indent 2 }}
+spec:
+  targetRef:
+{{- if .Values.server.statefulSet.enabled }}
+    apiVersion: "apps/v1"
+    kind: StatefulSet
+{{- else }}
+    apiVersion: "extensions/v1beta1"
+    kind: Deployment
+{{- end }}
+    name: {{ template "prometheus.server.fullname" . }}
+  updatePolicy:
+    updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }}
+  resourcePolicy:
+    containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }}
+{{- end -}} {{/* if .Values.server.verticalAutoscaler.enabled */}}
+{{- end -}} {{/* .Values.server.enabled */}}
diff --git a/helm/infrastructure/subcharts/prometheus/values.yaml b/helm/infrastructure/subcharts/prometheus/values.yaml
new file mode 100644 (file)
index 0000000..6afa967
--- /dev/null
@@ -0,0 +1,1546 @@
+rbac:
+  create: true
+
+podSecurityPolicy:
+  enabled: false
+
+imagePullSecrets:
+# - name: "image-pull-secret"
+
+## Define serviceAccount names for components. Defaults to component's fully qualified name.
+##
+serviceAccounts:
+  alertmanager:
+    create: true
+    name:
+    annotations: {}
+  nodeExporter:
+    create: true
+    name:
+    annotations: {}
+  pushgateway:
+    create: true
+    name:
+    annotations: {}
+  server:
+    create: true
+    name:
+    annotations: {}
+
+alertmanager:
+  ## If false, alertmanager will not be installed
+  ##
+  enabled: false
+
+  ## alertmanager container name
+  ##
+  name: alertmanager
+
+  ## alertmanager container image
+  ##
+  image:
+    repository: prom/alertmanager
+    tag: v0.20.0
+    pullPolicy: IfNotPresent
+
+  ## alertmanager priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## Additional alertmanager container arguments
+  ##
+  extraArgs: {}
+
+  ## Additional InitContainers to initialize the pod
+  ##
+  extraInitContainers: []
+
+  ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug
+  ## so that the various internal URLs are still able to access as they are in the default case.
+  ## (Optional)
+  prefixURL: ""
+
+  ## External URL which can access alertmanager
+  baseURL: "http://localhost:9093"
+
+  ## Additional alertmanager container environment variable
+  ## For instance to add a http_proxy
+  ##
+  extraEnv: {}
+
+  ## Additional alertmanager Secret mounts
+  # Defines additional mounts with secrets. Secrets must be manually created in the namespace.
+  extraSecretMounts: []
+    # - name: secret-files
+    #   mountPath: /etc/secrets
+    #   subPath: ""
+    #   secretName: alertmanager-secret-files
+    #   readOnly: true
+
+  ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}}
+  ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml
+  ## to NOT generate a ConfigMap resource
+  ##
+  configMapOverrideName: ""
+
+  ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config
+  ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml
+  ## to NOT generate a ConfigMap resource
+  ##
+  configFromSecret: ""
+
+  ## The configuration file name to be loaded to alertmanager
+  ## Must match the key within configuration loaded from ConfigMap/Secret
+  ##
+  configFileName: alertmanager.yml
+
+  ingress:
+    ## If true, alertmanager Ingress will be created
+    ##
+    enabled: false
+
+    ## alertmanager Ingress annotations
+    ##
+    annotations: {}
+    #   kubernetes.io/ingress.class: nginx
+    #   kubernetes.io/tls-acme: 'true'
+
+    ## alertmanager Ingress additional labels
+    ##
+    extraLabels: {}
+
+    ## alertmanager Ingress hostnames with optional path
+    ## Must be provided if Ingress is enabled
+    ##
+    hosts: []
+    #   - alertmanager.domain.com
+    #   - domain.com/alertmanager
+
+    ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
+    extraPaths: []
+    # - path: /*
+    #   backend:
+    #     serviceName: ssl-redirect
+    #     servicePort: use-annotation
+
+    ## alertmanager Ingress TLS configuration
+    ## Secrets must be manually created in the namespace
+    ##
+    tls: []
+    #   - secretName: prometheus-alerts-tls
+    #     hosts:
+    #       - alertmanager.domain.com
+
+  ## Alertmanager Deployment Strategy type
+  # strategy:
+  #   type: Recreate
+
+  ## Node tolerations for alertmanager scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for alertmanager pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Pod affinity
+  ##
+  affinity: {}
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## Use an alternate scheduler, e.g. "stork".
+  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+  ##
+  # schedulerName:
+
+  persistentVolume:
+    ## If true, alertmanager will create/use a Persistent Volume Claim
+    ## If false, use emptyDir
+    ##
+    enabled: true
+
+    ## alertmanager data Persistent Volume access modes
+    ## Must match those of existing PV or dynamic provisioner
+    ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+    ##
+    accessModes:
+      - ReadWriteOnce
+
+    ## alertmanager data Persistent Volume Claim annotations
+    ##
+    annotations: {}
+
+    ## alertmanager data Persistent Volume existing claim name
+    ## Requires alertmanager.persistentVolume.enabled: true
+    ## If defined, PVC must be created manually before volume will be bound
+    existingClaim: ""
+
+    ## alertmanager data Persistent Volume mount root path
+    ##
+    mountPath: /data
+
+    ## alertmanager data Persistent Volume size
+    ##
+    size: 2Gi
+
+    ## alertmanager data Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+
+    ## alertmanager data Persistent Volume Binding Mode
+    ## If defined, volumeBindingMode: <volumeBindingMode>
+    ## If undefined (the default) or set to null, no volumeBindingMode spec is
+    ##   set, choosing the default mode.
+    ##
+    # volumeBindingMode: ""
+
+    ## Subdirectory of alertmanager data Persistent Volume to mount
+    ## Useful if the volume's root directory is not empty
+    ##
+    subPath: ""
+
+  ## Annotations to be added to alertmanager pods
+  ##
+  podAnnotations: {}
+    ## Tell prometheus to use a specific set of alertmanager pods
+    ## instead of all alertmanager pods found in the same namespace
+    ## Useful if you deploy multiple releases within the same namespace
+    ##
+    ## prometheus.io/probe: alertmanager-teamA
+
+  ## Labels to be added to Prometheus AlertManager pods
+  ##
+  podLabels: {}
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below)
+  ##
+  replicaCount: 1
+
+  statefulSet:
+    ## If true, use a statefulset instead of a deployment for pod management.
+    ## This allows to scale replicas to more than 1 pod
+    ##
+    enabled: false
+
+    podManagementPolicy: OrderedReady
+
+    ## Alertmanager headless service to use for the statefulset
+    ##
+    headless:
+      annotations: {}
+      labels: {}
+
+      ## Enabling peer mesh service end points for enabling the HA alert manager
+      ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md
+      # enableMeshPeer : true
+
+      servicePort: 80
+
+  ## alertmanager resource requests and limits
+  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 10m
+    #   memory: 32Mi
+    # requests:
+    #   cpu: 10m
+    #   memory: 32Mi
+
+  ## Security context to be added to alertmanager pods
+  ##
+  securityContext:
+    runAsUser: 65534
+    runAsNonRoot: true
+    runAsGroup: 65534
+    fsGroup: 65534
+
+  service:
+    annotations: {}
+    labels: {}
+    clusterIP: ""
+
+    ## Enabling peer mesh service end points for enabling the HA alert manager
+    ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md
+    # enableMeshPeer : true
+
+    ## List of IP addresses at which the alertmanager service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 80
+    # nodePort: 30000
+    sessionAffinity: None
+    type: ClusterIP
+
+## Monitors ConfigMap changes and POSTs to a URL
+## Ref: https://github.com/jimmidyson/configmap-reload
+##
+configmapReload:
+  prometheus:
+    ## If false, the configmap-reload container will not be deployed
+    ##
+    enabled: false
+
+    ## configmap-reload container name
+    ##
+    name: configmap-reload
+
+    ## configmap-reload container image
+    ##
+    image:
+      repository: jimmidyson/configmap-reload
+      tag: v0.3.0
+      pullPolicy: IfNotPresent
+
+    ## Additional configmap-reload container arguments
+    ##
+    extraArgs: {}
+    ## Additional configmap-reload volume directories
+    ##
+    extraVolumeDirs: []
+
+
+    ## Additional configmap-reload mounts
+    ##
+    extraConfigmapMounts: []
+      # - name: prometheus-alerts
+      #   mountPath: /etc/alerts.d
+      #   subPath: ""
+      #   configMap: prometheus-alerts
+      #   readOnly: true
+
+
+    ## configmap-reload resource requests and limits
+    ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+    ##
+    resources: {}
+  alertmanager:
+    ## If false, the configmap-reload container will not be deployed
+    ##
+    enabled: true
+
+    ## configmap-reload container name
+    ##
+    name: configmap-reload
+
+    ## configmap-reload container image
+    ##
+    image:
+      repository: jimmidyson/configmap-reload
+      tag: v0.3.0
+      pullPolicy: IfNotPresent
+
+    ## Additional configmap-reload container arguments
+    ##
+    extraArgs: {}
+    ## Additional configmap-reload volume directories
+    ##
+    extraVolumeDirs: []
+
+
+    ## Additional configmap-reload mounts
+    ##
+    extraConfigmapMounts: []
+      # - name: prometheus-alerts
+      #   mountPath: /etc/alerts.d
+      #   subPath: ""
+      #   configMap: prometheus-alerts
+      #   readOnly: true
+
+
+    ## configmap-reload resource requests and limits
+    ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+    ##
+    resources: {}
+
+kubeStateMetrics:
+  ## If false, kube-state-metrics sub-chart will not be installed
+  ##
+  enabled: false
+
+## kube-state-metrics sub-chart configurable values
+## Please see https://github.com/helm/charts/tree/master/stable/kube-state-metrics
+##
+# kube-state-metrics:
+
+nodeExporter:
+  ## If false, node-exporter will not be installed
+  ##
+  enabled: false
+
+  ## If true, node-exporter pods share the host network namespace
+  ##
+  hostNetwork: true
+
+  ## If true, node-exporter pods share the host PID namespace
+  ##
+  hostPID: true
+
+  ## node-exporter container name
+  ##
+  name: node-exporter
+
+  ## node-exporter container image
+  ##
+  image:
+    repository: prom/node-exporter
+    tag: v0.18.1
+    pullPolicy: IfNotPresent
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  ## node-exporter priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## Custom Update Strategy
+  ##
+  updateStrategy:
+    type: RollingUpdate
+
+  ## Additional node-exporter container arguments
+  ##
+  extraArgs: {}
+
+  ## Additional InitContainers to initialize the pod
+  ##
+  extraInitContainers: []
+
+  ## Additional node-exporter hostPath mounts
+  ##
+  extraHostPathMounts: []
+    # - name: textfile-dir
+    #   mountPath: /srv/txt_collector
+    #   hostPath: /var/lib/node-exporter
+    #   readOnly: true
+    #   mountPropagation: HostToContainer
+
+  extraConfigmapMounts: []
+    # - name: certs-configmap
+    #   mountPath: /prometheus
+    #   configMap: certs-configmap
+    #   readOnly: true
+
+  ## Node tolerations for node-exporter scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for node-exporter pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Annotations to be added to node-exporter pods
+  ##
+  podAnnotations: {}
+
+  ## Labels to be added to node-exporter pods
+  ##
+  pod:
+    labels: {}
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## node-exporter resource limits & requests
+  ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 200m
+    #   memory: 50Mi
+    # requests:
+    #   cpu: 100m
+    #   memory: 30Mi
+
+  ## Security context to be added to node-exporter pods
+  ##
+  securityContext: {}
+    # runAsUser: 0
+
+  service:
+    annotations:
+      prometheus.io/scrape: "true"
+    labels: {}
+
+    # Exposed as a headless service:
+    # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services
+    clusterIP: None
+
+    ## List of IP addresses at which the node-exporter service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    hostPort: 9100
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 9100
+    type: ClusterIP
+
+server:
+  ## Prometheus server container name
+  ##
+  enabled: true
+  name: server
+  sidecarContainers:
+
+  ## Prometheus server container image
+  ##
+  image:
+    repository: prom/prometheus
+    tag: v2.18.1
+    pullPolicy: IfNotPresent
+
+  ## prometheus server priorityClassName
+  ##
+  priorityClassName: ""
+
+  # EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links.
+  enableServiceLinks: true
+
+  ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug
+  ## so that the various internal URLs are still able to access as they are in the default case.
+  ## (Optional)
+  prefixURL: ""
+
+  ## External URL which can access alertmanager
+  ## Maybe same with Ingress host name
+  baseURL: ""
+
+  ## Additional server container environment variables
+  ##
+  ## You specify this manually like you would a raw deployment manifest.
+  ## This means you can bind in environment variables from secrets.
+  ##
+  ## e.g. static environment variable:
+  ##  - name: DEMO_GREETING
+  ##    value: "Hello from the environment"
+  ##
+  ## e.g. secret environment variable:
+  ## - name: USERNAME
+  ##   valueFrom:
+  ##     secretKeyRef:
+  ##       name: mysecret
+  ##       key: username
+  env: []
+
+  extraFlags:
+    - web.enable-lifecycle
+    ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as
+    ## deleting time series. This is disabled by default.
+    # - web.enable-admin-api
+    ##
+    ## storage.tsdb.no-lockfile flag controls BD locking
+    # - storage.tsdb.no-lockfile
+    ##
+    ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL)
+    # - storage.tsdb.wal-compression
+
+  ## Path to a configuration file on prometheus server container FS
+  configPath: /etc/config/prometheus.yml
+
+  global:
+    ## How frequently to scrape targets by default
+    ##
+    scrape_interval: 1m
+    ## How long until a scrape request times out
+    ##
+    scrape_timeout: 10s
+    ## How frequently to evaluate rules
+    ##
+    evaluation_interval: 1m
+  ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write
+  ##
+  remoteWrite: []
+  ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read
+  ##
+  remoteRead: []
+
+  ## Additional Prometheus server container arguments
+  ##
+  extraArgs: {}
+
+  ## Additional InitContainers to initialize the pod
+  ##
+  extraInitContainers: []
+
+  ## Additional Prometheus server Volume mounts
+  ##
+  extraVolumeMounts: []
+
+  ## Additional Prometheus server Volumes
+  ##
+  extraVolumes: []
+
+  ## Additional Prometheus server hostPath mounts
+  ##
+  extraHostPathMounts: []
+    # - name: certs-dir
+    #   mountPath: /etc/kubernetes/certs
+    #   subPath: ""
+    #   hostPath: /etc/kubernetes/certs
+    #   readOnly: true
+
+  extraConfigmapMounts: []
+    # - name: certs-configmap
+    #   mountPath: /prometheus
+    #   subPath: ""
+    #   configMap: certs-configmap
+    #   readOnly: true
+
+  ## Additional Prometheus server Secret mounts
+  # Defines additional mounts with secrets. Secrets must be manually created in the namespace.
+  extraSecretMounts: []
+    # - name: secret-files
+    #   mountPath: /etc/secrets
+    #   subPath: ""
+    #   secretName: prom-secret-files
+    #   readOnly: true
+
+  ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}}
+  ## Defining configMapOverrideName will cause templates/server-configmap.yaml
+  ## to NOT generate a ConfigMap resource
+  ##
+  configMapOverrideName: ""
+
+  ingress:
+    ## If true, Prometheus server Ingress will be created
+    ##
+    enabled: false
+
+    ## Prometheus server Ingress annotations
+    ##
+    annotations: {}
+    #   kubernetes.io/ingress.class: nginx
+    #   kubernetes.io/tls-acme: 'true'
+
+    ## Prometheus server Ingress additional labels
+    ##
+    extraLabels: {}
+
+    ## Prometheus server Ingress hostnames with optional path
+    ## Must be provided if Ingress is enabled
+    ##
+    hosts: []
+    #   - prometheus.domain.com
+    #   - domain.com/prometheus
+
+    ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
+    extraPaths: []
+    # - path: /*
+    #   backend:
+    #     serviceName: ssl-redirect
+    #     servicePort: use-annotation
+
+    ## Prometheus server Ingress TLS configuration
+    ## Secrets must be manually created in the namespace
+    ##
+    tls: []
+    #   - secretName: prometheus-server-tls
+    #     hosts:
+    #       - prometheus.domain.com
+
+  ## Server Deployment Strategy type
+  # strategy:
+  #   type: Recreate
+
+  ## hostAliases allows adding entries to /etc/hosts inside the containers
+  hostAliases: []
+  #   - ip: "127.0.0.1"
+  #     hostnames:
+  #       - "example.com"
+
+  ## Node tolerations for server scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for Prometheus server pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Pod affinity
+  ##
+  affinity: {}
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## Use an alternate scheduler, e.g. "stork".
+  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+  ##
+  # schedulerName:
+
+  persistentVolume:
+    ## If true, Prometheus server will create/use a Persistent Volume Claim
+    ## If false, use emptyDir
+    ##
+    enabled: false
+
+    ## Prometheus server data Persistent Volume access modes
+    ## Must match those of existing PV or dynamic provisioner
+    ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+    ##
+    accessModes:
+      - ReadWriteOnce
+
+    ## Prometheus server data Persistent Volume annotations
+    ##
+    annotations: {}
+
+    ## Prometheus server data Persistent Volume existing claim name
+    ## Requires server.persistentVolume.enabled: true
+    ## If defined, PVC must be created manually before volume will be bound
+    existingClaim: ""
+
+    ## Prometheus server data Persistent Volume mount root path
+    ##
+    mountPath: /data
+
+    ## Prometheus server data Persistent Volume size
+    ##
+    size: 8Gi
+
+    ## Prometheus server data Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+
+    ## Prometheus server data Persistent Volume Binding Mode
+    ## If defined, volumeBindingMode: <volumeBindingMode>
+    ## If undefined (the default) or set to null, no volumeBindingMode spec is
+    ##   set, choosing the default mode.
+    ##
+    # volumeBindingMode: ""
+
+    ## Subdirectory of Prometheus server data Persistent Volume to mount
+    ## Useful if the volume's root directory is not empty
+    ##
+    subPath: ""
+
+  emptyDir:
+    sizeLimit: ""
+
+  ## Annotations to be added to Prometheus server pods
+  ##
+  podAnnotations: {}
+    # iam.amazonaws.com/role: prometheus
+
+  ## Labels to be added to Prometheus server pods
+  ##
+  podLabels: {}
+
+  ## Prometheus AlertManager configuration
+  ##
+  alertmanagers: []
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below)
+  ##
+  replicaCount: 1
+
+  statefulSet:
+    ## If true, use a statefulset instead of a deployment for pod management.
+    ## This allows to scale replicas to more than 1 pod
+    ##
+    enabled: false
+
+    annotations: {}
+    labels: {}
+    podManagementPolicy: OrderedReady
+
+    ## Alertmanager headless service to use for the statefulset
+    ##
+    headless:
+      annotations: {}
+      labels: {}
+      servicePort: 80
+
+  ## Prometheus server readiness and liveness probe initial delay and timeout
+  ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
+  ##
+  readinessProbeInitialDelay: 30
+  readinessProbeTimeout: 30
+  readinessProbeFailureThreshold: 3
+  readinessProbeSuccessThreshold: 1
+  livenessProbeInitialDelay: 30
+  livenessProbeTimeout: 30
+  livenessProbeFailureThreshold: 3
+  livenessProbeSuccessThreshold: 1
+
+  ## Prometheus server resource requests and limits
+  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 500m
+    #   memory: 512Mi
+    # requests:
+    #   cpu: 500m
+    #   memory: 512Mi
+
+  ## Vertical Pod Autoscaler config
+  ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
+  verticalAutoscaler:
+    ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs)
+    enabled: false
+    # updateMode: "Auto"
+    # containerPolicies:
+    # - containerName: 'prometheus-server'
+
+  ## Security context to be added to server pods
+  ##
+  securityContext:
+    runAsUser: 65534
+    runAsNonRoot: true
+    runAsGroup: 65534
+    fsGroup: 65534
+
+  service:
+    annotations: {}
+    labels: {}
+    clusterIP: ""
+
+    ## List of IP addresses at which the Prometheus server service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 80
+    sessionAffinity: None
+    type: ClusterIP
+
+    ## Enable gRPC port on service to allow auto discovery with thanos-querier
+    gRPC:
+      enabled: false
+      servicePort: 10901
+      # nodePort: 10901
+
+    ## If using a statefulSet (statefulSet.enabled=true), configure the
+    ## service to connect to a specific replica to have a consistent view
+    ## of the data.
+    statefulsetReplica:
+      enabled: false
+      replica: 0
+
+  ## Prometheus server pod termination grace period
+  ##
+  terminationGracePeriodSeconds: 300
+
+  ## Prometheus data retention period (default if not specified is 15 days)
+  ##
+  retention: "15d"
+
+pushgateway:
+  ## If false, pushgateway will not be installed
+  ##
+  enabled: false
+
+  ## Use an alternate scheduler, e.g. "stork".
+  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+  ##
+  # schedulerName:
+
+  ## pushgateway container name
+  ##
+  name: pushgateway
+
+  ## pushgateway container image
+  ##
+  image:
+    repository: prom/pushgateway
+    tag: v1.0.1
+    pullPolicy: IfNotPresent
+
+  ## pushgateway priorityClassName
+  ##
+  priorityClassName: ""
+
+  ## Additional pushgateway container arguments
+  ##
+  ## for example: persistence.file: /data/pushgateway.data
+  extraArgs: {}
+
+  ## Additional InitContainers to initialize the pod
+  ##
+  extraInitContainers: []
+
+  ingress:
+    ## If true, pushgateway Ingress will be created
+    ##
+    enabled: false
+
+    ## pushgateway Ingress annotations
+    ##
+    annotations: {}
+    #   kubernetes.io/ingress.class: nginx
+    #   kubernetes.io/tls-acme: 'true'
+
+    ## pushgateway Ingress hostnames with optional path
+    ## Must be provided if Ingress is enabled
+    ##
+    hosts: []
+    #   - pushgateway.domain.com
+    #   - domain.com/pushgateway
+
+    ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
+    extraPaths: []
+    # - path: /*
+    #   backend:
+    #     serviceName: ssl-redirect
+    #     servicePort: use-annotation
+
+    ## pushgateway Ingress TLS configuration
+    ## Secrets must be manually created in the namespace
+    ##
+    tls: []
+    #   - secretName: prometheus-alerts-tls
+    #     hosts:
+    #       - pushgateway.domain.com
+
+  ## Node tolerations for pushgateway scheduling to nodes with taints
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  ##
+  tolerations: []
+    # - key: "key"
+    #   operator: "Equal|Exists"
+    #   value: "value"
+    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+  ## Node labels for pushgateway pod assignment
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+
+  ## Annotations to be added to pushgateway pods
+  ##
+  podAnnotations: {}
+
+  ## Specify if a Pod Security Policy for node-exporter must be created
+  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+  ##
+  podSecurityPolicy:
+    annotations: {}
+      ## Specify pod annotations
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+      ##
+      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+  replicaCount: 1
+
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
+  ## pushgateway resource requests and limits
+  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources: {}
+    # limits:
+    #   cpu: 10m
+    #   memory: 32Mi
+    # requests:
+    #   cpu: 10m
+    #   memory: 32Mi
+
+  ## Security context to be added to push-gateway pods
+  ##
+  securityContext:
+    runAsUser: 65534
+    runAsNonRoot: true
+
+  service:
+    annotations:
+      prometheus.io/probe: pushgateway
+    labels: {}
+    clusterIP: ""
+
+    ## List of IP addresses at which the pushgateway service is available
+    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+    ##
+    externalIPs: []
+
+    loadBalancerIP: ""
+    loadBalancerSourceRanges: []
+    servicePort: 9091
+    type: ClusterIP
+
+  ## pushgateway Deployment Strategy type
+  # strategy:
+  #   type: Recreate
+
+  persistentVolume:
+    ## If true, pushgateway will create/use a Persistent Volume Claim
+    ## If false, use emptyDir
+    ##
+    enabled: false
+
+    ## pushgateway data Persistent Volume access modes
+    ## Must match those of existing PV or dynamic provisioner
+    ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+    ##
+    accessModes:
+      - ReadWriteOnce
+
+    ## pushgateway data Persistent Volume Claim annotations
+    ##
+    annotations: {}
+
+    ## pushgateway data Persistent Volume existing claim name
+    ## Requires pushgateway.persistentVolume.enabled: true
+    ## If defined, PVC must be created manually before volume will be bound
+    existingClaim: ""
+
+    ## pushgateway data Persistent Volume mount root path
+    ##
+    mountPath: /data
+
+    ## pushgateway data Persistent Volume size
+    ##
+    size: 2Gi
+
+    ## pushgateway data Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+
+    ## pushgateway data Persistent Volume Binding Mode
+    ## If defined, volumeBindingMode: <volumeBindingMode>
+    ## If undefined (the default) or set to null, no volumeBindingMode spec is
+    ##   set, choosing the default mode.
+    ##
+    # volumeBindingMode: ""
+
+    ## Subdirectory of pushgateway data Persistent Volume to mount
+    ## Useful if the volume's root directory is not empty
+    ##
+    subPath: ""
+
+
+## alertmanager ConfigMap entries
+##
+alertmanagerFiles:
+  alertmanager.yml:
+    global: {}
+      # slack_api_url: ''
+
+    receivers:
+      - name: default-receiver
+        # slack_configs:
+        #  - channel: '@you'
+        #    send_resolved: true
+
+    route:
+      group_wait: 10s
+      group_interval: 5m
+      receiver: default-receiver
+      repeat_interval: 3h
+
+## Prometheus server ConfigMap entries
+##
+serverFiles:
+
+  ## Alerts configuration
+  ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/
+  alerting_rules.yml: {}
+  # groups:
+  #   - name: Instances
+  #     rules:
+  #       - alert: InstanceDown
+  #         expr: up == 0
+  #         for: 5m
+  #         labels:
+  #           severity: page
+  #         annotations:
+  #           description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.'
+  #           summary: 'Instance {{ $labels.instance }} down'
+  ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml
+  alerts: {}
+
+  ## Records configuration
+  ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/
+  recording_rules.yml: {}
+  ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml
+  rules: {}
+
+  prometheus.yml:
+    rule_files:
+      - /etc/config/recording_rules.yml
+      - /etc/config/alerting_rules.yml
+    ## Below two files are DEPRECATED will be removed from this default values file
+      - /etc/config/rules
+      - /etc/config/alerts
+
+    scrape_configs:
+      - job_name: prometheus
+        static_configs:
+          - targets:
+            - localhost:9090
+
+      # A scrape configuration for running Prometheus on a Kubernetes cluster.
+      # This uses separate scrape configs for cluster components (i.e. API server, node)
+      # and services to allow each to use different authentication configs.
+      #
+      # Kubernetes labels will be added as Prometheus labels on metrics via the
+      # `labelmap` relabeling action.
+
+      # Scrape config for API servers.
+      #
+      # Kubernetes exposes API servers as endpoints to the default/kubernetes
+      # service so this uses `endpoints` role and uses relabelling to only keep
+      # the endpoints associated with the default/kubernetes service using the
+      # default named port `https`. This works for single API server deployments as
+      # well as HA API server deployments.
+      - job_name: 'kubernetes-apiservers'
+
+        kubernetes_sd_configs:
+          - role: endpoints
+
+        # Default to scraping over https. If required, just disable this or change to
+        # `http`.
+        scheme: https
+
+        # This TLS & bearer token file config is used to connect to the actual scrape
+        # endpoints for cluster components. This is separate to discovery auth
+        # configuration because discovery & scraping are two separate concerns in
+        # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+        # the cluster. Otherwise, more config options have to be provided within the
+        # <kubernetes_sd_config>.
+        tls_config:
+          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+          # If your node certificates are self-signed or use a different CA to the
+          # master CA, then disable certificate verification below. Note that
+          # certificate verification is an integral part of a secure infrastructure
+          # so this should only be disabled in a controlled environment. You can
+          # disable certificate verification by uncommenting the line below.
+          #
+          insecure_skip_verify: true
+        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+        # Keep only the default/kubernetes service endpoints for the https port. This
+        # will add targets for each API server which Kubernetes adds an endpoint to
+        # the default/kubernetes service.
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+            action: keep
+            regex: default;kubernetes;https
+
+      - job_name: 'kubernetes-nodes'
+
+        # Default to scraping over https. If required, just disable this or change to
+        # `http`.
+        scheme: https
+
+        # This TLS & bearer token file config is used to connect to the actual scrape
+        # endpoints for cluster components. This is separate to discovery auth
+        # configuration because discovery & scraping are two separate concerns in
+        # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+        # the cluster. Otherwise, more config options have to be provided within the
+        # <kubernetes_sd_config>.
+        tls_config:
+          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+          # If your node certificates are self-signed or use a different CA to the
+          # master CA, then disable certificate verification below. Note that
+          # certificate verification is an integral part of a secure infrastructure
+          # so this should only be disabled in a controlled environment. You can
+          # disable certificate verification by uncommenting the line below.
+          #
+          insecure_skip_verify: true
+        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+        kubernetes_sd_configs:
+          - role: node
+
+        relabel_configs:
+          - action: labelmap
+            regex: __meta_kubernetes_node_label_(.+)
+          - target_label: __address__
+            replacement: kubernetes.default.svc:443
+          - source_labels: [__meta_kubernetes_node_name]
+            regex: (.+)
+            target_label: __metrics_path__
+            replacement: /api/v1/nodes/$1/proxy/metrics
+
+
+      - job_name: 'kubernetes-nodes-cadvisor'
+
+        # Default to scraping over https. If required, just disable this or change to
+        # `http`.
+        scheme: https
+
+        # This TLS & bearer token file config is used to connect to the actual scrape
+        # endpoints for cluster components. This is separate to discovery auth
+        # configuration because discovery & scraping are two separate concerns in
+        # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+        # the cluster. Otherwise, more config options have to be provided within the
+        # <kubernetes_sd_config>.
+        tls_config:
+          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+          # If your node certificates are self-signed or use a different CA to the
+          # master CA, then disable certificate verification below. Note that
+          # certificate verification is an integral part of a secure infrastructure
+          # so this should only be disabled in a controlled environment. You can
+          # disable certificate verification by uncommenting the line below.
+          #
+          insecure_skip_verify: true
+        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+        kubernetes_sd_configs:
+          - role: node
+
+        # This configuration will work only on kubelet 1.7.3+
+        # As the scrape endpoints for cAdvisor have changed
+        # if you are using older version you need to change the replacement to
+        # replacement: /api/v1/nodes/$1:4194/proxy/metrics
+        # more info here https://github.com/coreos/prometheus-operator/issues/633
+        relabel_configs:
+          - action: labelmap
+            regex: __meta_kubernetes_node_label_(.+)
+          - target_label: __address__
+            replacement: kubernetes.default.svc:443
+          - source_labels: [__meta_kubernetes_node_name]
+            regex: (.+)
+            target_label: __metrics_path__
+            replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor
+
+      # Scrape config for service endpoints.
+      #
+      # The relabeling allows the actual service scrape endpoint to be configured
+      # via the following annotations:
+      #
+      # * `prometheus.io/scrape`: Only scrape services that have a value of `true`
+      # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+      # to set this to `https` & most likely set the `tls_config` of the scrape config.
+      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+      # * `prometheus.io/port`: If the metrics are exposed on a different port to the
+      # service then set this appropriately.
+      - job_name: 'kubernetes-service-endpoints'
+
+        kubernetes_sd_configs:
+          - role: endpoints
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+            action: keep
+            regex: true
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+            action: replace
+            target_label: __scheme__
+            regex: (https?)
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+            action: replace
+            target_label: __metrics_path__
+            regex: (.+)
+          - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+            action: replace
+            target_label: __address__
+            regex: ([^:]+)(?::\d+)?;(\d+)
+            replacement: $1:$2
+          - action: labelmap
+            regex: __meta_kubernetes_service_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            action: replace
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_service_name]
+            action: replace
+            target_label: kubernetes_name
+          - source_labels: [__meta_kubernetes_pod_node_name]
+            action: replace
+            target_label: kubernetes_node
+
+      # Scrape config for slow service endpoints; same as above, but with a larger
+      # timeout and a larger interval
+      #
+      # The relabeling allows the actual service scrape endpoint to be configured
+      # via the following annotations:
+      #
+      # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true`
+      # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+      # to set this to `https` & most likely set the `tls_config` of the scrape config.
+      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+      # * `prometheus.io/port`: If the metrics are exposed on a different port to the
+      # service then set this appropriately.
+      - job_name: 'kubernetes-service-endpoints-slow'
+
+        scrape_interval: 5m
+        scrape_timeout: 30s
+
+        kubernetes_sd_configs:
+          - role: endpoints
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow]
+            action: keep
+            regex: true
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+            action: replace
+            target_label: __scheme__
+            regex: (https?)
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+            action: replace
+            target_label: __metrics_path__
+            regex: (.+)
+          - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+            action: replace
+            target_label: __address__
+            regex: ([^:]+)(?::\d+)?;(\d+)
+            replacement: $1:$2
+          - action: labelmap
+            regex: __meta_kubernetes_service_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            action: replace
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_service_name]
+            action: replace
+            target_label: kubernetes_name
+          - source_labels: [__meta_kubernetes_pod_node_name]
+            action: replace
+            target_label: kubernetes_node
+
+      - job_name: 'prometheus-pushgateway'
+        honor_labels: true
+
+        kubernetes_sd_configs:
+          - role: service
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
+            action: keep
+            regex: pushgateway
+
+      # Example scrape config for probing services via the Blackbox Exporter.
+      #
+      # The relabeling allows the actual service scrape endpoint to be configured
+      # via the following annotations:
+      #
+      # * `prometheus.io/probe`: Only probe services that have a value of `true`
+      - job_name: 'kubernetes-services'
+
+        metrics_path: /probe
+        params:
+          module: [http_2xx]
+
+        kubernetes_sd_configs:
+          - role: service
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
+            action: keep
+            regex: true
+          - source_labels: [__address__]
+            target_label: __param_target
+          - target_label: __address__
+            replacement: blackbox
+          - source_labels: [__param_target]
+            target_label: instance
+          - action: labelmap
+            regex: __meta_kubernetes_service_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_service_name]
+            target_label: kubernetes_name
+
+      # Example scrape config for pods
+      #
+      # The relabeling allows the actual pod scrape endpoint to be configured via the
+      # following annotations:
+      #
+      # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
+      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+      # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
+      - job_name: 'kubernetes-pods'
+
+        kubernetes_sd_configs:
+          - role: pod
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
+            action: keep
+            regex: true
+          - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+            action: replace
+            target_label: __metrics_path__
+            regex: (.+)
+          - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
+            action: replace
+            regex: ([^:]+)(?::\d+)?;(\d+)
+            replacement: $1:$2
+            target_label: __address__
+          - action: labelmap
+            regex: __meta_kubernetes_pod_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            action: replace
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_pod_name]
+            action: replace
+            target_label: kubernetes_pod_name
+
+      # Example Scrape config for pods which should be scraped slower. An useful example
+      # would be stackriver-exporter which querys an API on every scrape of the pod
+      #
+      # The relabeling allows the actual pod scrape endpoint to be configured via the
+      # following annotations:
+      #
+      # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true`
+      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+      # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
+      - job_name: 'kubernetes-pods-slow'
+
+        scrape_interval: 5m
+        scrape_timeout: 30s
+
+        kubernetes_sd_configs:
+          - role: pod
+
+        relabel_configs:
+          - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow]
+            action: keep
+            regex: true
+          - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+            action: replace
+            target_label: __metrics_path__
+            regex: (.+)
+          - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
+            action: replace
+            regex: ([^:]+)(?::\d+)?;(\d+)
+            replacement: $1:$2
+            target_label: __address__
+          - action: labelmap
+            regex: __meta_kubernetes_pod_label_(.+)
+          - source_labels: [__meta_kubernetes_namespace]
+            action: replace
+            target_label: kubernetes_namespace
+          - source_labels: [__meta_kubernetes_pod_name]
+            action: replace
+            target_label: kubernetes_pod_name
+
+# adds additional scrape configs to prometheus.yml
+# must be a string so you have to add a | after extraScrapeConfigs:
+# example adds prometheus-blackbox-exporter scrape config
+extraScrapeConfigs:
+  # - job_name: 'prometheus-blackbox-exporter'
+  #   metrics_path: /probe
+  #   params:
+  #     module: [http_2xx]
+  #   static_configs:
+  #     - targets:
+  #       - https://example.com
+  #   relabel_configs:
+  #     - source_labels: [__address__]
+  #       target_label: __param_target
+  #     - source_labels: [__param_target]
+  #       target_label: instance
+  #     - target_label: __address__
+  #       replacement: prometheus-blackbox-exporter:9115
+
+# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager
+# useful in H/A prometheus with different external labels but the same alerts
+alertRelabelConfigs:
+  # alert_relabel_configs:
+  # - source_labels: [dc]
+  #   regex: (.+)\d+
+  #   target_label: dc
+
+networkPolicy:
+  ## Enable creation of NetworkPolicy resources.
+  ##
+  enabled: false
+
+# Force namespace of namespaced resources
+forceNamespace: null
index bd0c0f5..d1ba5c5 100644 (file)
@@ -76,6 +76,10 @@ certificate-manager:
   enabled: true
 
 
+prometheus:
+  enabled: false
+
+
 danm-network:
   enabled: true
 
index a8ee386..92c3f8d 100644 (file)
@@ -27,9 +27,12 @@ data:
       "level": 3
     "rmr":
       "protPort" : "tcp:4560"
-      "maxSize": 2072
+      "maxSize": 8192
       "numWorkers": 1
     "rtmgr":
       "hostAddr": {{ include "common.servicename.rtmgr.http" . | quote }}
       "port"    : {{ include "common.serviceport.rtmgr.http" . }}
       "baseUrl" : "/ric/v1"
+  submgrutartg: |
+      newrt|start
+      newrt|end
index c566073..49db0ee 100644 (file)
@@ -50,7 +50,8 @@ spec:
         - name: {{ include "common.containername.submgr" . }}
           image: {{ include "common.dockerregistry.url" $imagectx }}/{{ .Values.submgr.image.name }}:{{ .Values.submgr.image.tag }}
           imagePullPolicy: {{ include "common.dockerregistry.pullpolicy" $pullpolicyctx }}
-          command: ["/run_submgr.sh"]
+          command: ["/submgr"]
+          args: ["-f", "/cfg/submgr-config.yaml"]
           envFrom:
             - configMapRef:
                 name: {{ include "common.configmapname.submgr" . }}-env
@@ -77,4 +78,6 @@ spec:
               - key: submgrcfg
                 path: submgr-config.yaml
                 mode: 0644
-
+              - key: submgrutartg
+                path: submgr-uta-rtg.rt
+                mode: 0644
index 6dbd565..4d40840 100644 (file)
@@ -21,5 +21,6 @@ metadata:
 data:
   RMR_RTG_SVC: {{ include "common.serviceport.submgr.rmr.route" . | quote }}
   RMR_SRC_ID: {{ include "common.servicename.submgr.rmr" . }}.{{ include "common.namespace.platform" . }}
-  CFGFILE: "/cfg/submgr-config.yaml"
+  CFG_FILE: "/cfg/submgr-config.yaml"
+  RMR_SEED_RT: "/cfg/submgr-uta-rtg.rt"
   SUBMGR_SEED_SN: "1"