RELEASE_NAME=$RICAUX_RELEASE_NAME
fi
-RICAUX_COMPONENTS="dashboard message-router ves kong-aux"
+RICAUX_COMPONENTS="dashboard message-router ves kong"
echo "Undeploying RIC AUX components [$RICAUX_COMPONENTS]"
echo "Helm Release Name: $RELEASE_NAME"
--- /dev/null
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+OWNERS
--- /dev/null
+apiVersion: v1
+appVersion: "1.2"
+description: The Cloud-Native Ingress and Service Mesh for APIs and Microservices
+engine: gotpl
+home: https://KongHQ.com/
+icon: https://s3.amazonaws.com/downloads.kong/universe/assets/icon-kong-inc-large.png
+maintainers:
+- email: shashi@konghq.com
+ name: shashiranjan84
+name: kong
+sources:
+- https://github.com/Kong/kong
+version: 0.12.2
--- /dev/null
+## Kong
+
+[Kong](https://KongHQ.com/) is an open-source API Gateway and Microservices
+Management Layer, delivering high performance and reliability.
+
+## TL;DR;
+
+```bash
+$ helm install stable/kong
+```
+
+## Introduction
+
+This chart bootstraps all the components needed to run Kong on a [Kubernetes](http://kubernetes.io)
+cluster using the [Helm](https://helm.sh) package manager.
+
+## Prerequisites
+
+- Kubernetes 1.8+ with Beta APIs enabled.
+- PV provisioner support in the underlying infrastructure if persistence
+ is needed for Kong datastore.
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```bash
+$ helm install --name my-release stable/kong
+```
+
+If using Kong Enterprise, several additional steps are necessary before
+installing the chart. At minimum, you must:
+* Create a [license secret](#license).
+* Set `enterprise.enabled: true` in values.yaml.
+* Update values.yaml to use a Kong Enterprise image. If needed, follow the
+instructions in values.yaml to add a registry pull secret.
+
+Reading through [the full list of Enterprise considerations](#kong-enterprise-specific-parameters)
+is recommended.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```bash
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the
+chart and deletes the release.
+
+## Configuration
+
+### General Configuration Parameters
+
+The following table lists the configurable parameters of the Kong chart
+and their default values.
+
+| Parameter | Description | Default |
+| ------------------------------ | -------------------------------------------------------------------------------- | ------------------- |
+| image.repository | Kong image | `kong` |
+| image.tag | Kong image version | `1.2` |
+| image.pullPolicy | Image pull policy | `IfNotPresent` |
+| image.pullSecrets | Image pull secrets | `null` |
+| replicaCount | Kong instance count | `1` |
+| admin.useTLS | Secure Admin traffic | `true` |
+| admin.servicePort | TCP port on which the Kong admin service is exposed | `8444` |
+| admin.containerPort | TCP port on which Kong app listens for admin traffic | `8444` |
+| admin.nodePort | Node port when service type is `NodePort` | |
+| admin.hostPort | Host port to use for admin traffic | |
+| admin.type | k8s service type, Options: NodePort, ClusterIP, LoadBalancer | `NodePort` |
+| admin.loadBalancerIP | Will reuse an existing ingress static IP for the admin service | `null` |
+| admin.loadBalancerSourceRanges | Limit admin access to CIDRs if set and service type is `LoadBalancer` | `[]` |
+| admin.ingress.enabled | Enable ingress resource creation (works with proxy.type=ClusterIP) | `false` |
+| admin.ingress.tls | Name of secret resource, containing TLS secret | |
+| admin.ingress.hosts | List of ingress hosts. | `[]` |
+| admin.ingress.path | Ingress path. | `/` |
+| admin.ingress.annotations | Ingress annotations. See documentation for your ingress controller for details | `{}` |
+| proxy.http.enabled | Enables http on the proxy | true |
+| proxy.http.servicePort | Service port to use for http | 80 |
+| proxy.http.containerPort | Container port to use for http | 8000 |
+| proxy.http.nodePort | Node port to use for http | 32080 |
+| proxy.http.hostPort | Host port to use for http | |
+| proxy.tls.enabled | Enables TLS on the proxy | true |
+| proxy.tls.containerPort | Container port to use for TLS | 8443 |
+| proxy.tls.servicePort | Service port to use for TLS | 8443 |
+| proxy.tls.nodePort | Node port to use for TLS | 32443 |
+| proxy.tls.hostPort | Host port to use for TLS | |
+| proxy.type | k8s service type. Options: NodePort, ClusterIP, LoadBalancer | `NodePort` |
+| proxy.loadBalancerSourceRanges | Limit proxy access to CIDRs if set and service type is `LoadBalancer` | `[]` |
+| proxy.loadBalancerIP | To reuse an existing ingress static IP for the admin service | |
+| proxy.externalIPs | IPs for which nodes in the cluster will also accept traffic for the proxy | `[]` |
+| proxy.externalTrafficPolicy | k8s service's externalTrafficPolicy. Options: Cluster, Local | |
+| proxy.ingress.enabled | Enable ingress resource creation (works with proxy.type=ClusterIP) | `false` |
+| proxy.ingress.tls | Name of secret resource, containing TLS secret | |
+| proxy.ingress.hosts | List of ingress hosts. | `[]` |
+| proxy.ingress.path | Ingress path. | `/` |
+| proxy.ingress.annotations | Ingress annotations. See documentation for your ingress controller for details | `{}` |
+| env | Additional [Kong configurations](https://getkong.org/docs/latest/configuration/) | |
+| runMigrations | Run Kong migrations job | `true` |
+| readinessProbe | Kong readiness probe | |
+| livenessProbe | Kong liveness probe | |
+| affinity | Node/pod affinities | |
+| nodeSelector | Node labels for pod assignment | `{}` |
+| podAnnotations | Annotations to add to each pod | `{}` |
+| resources | Pod resource requests & limits | `{}` |
+| tolerations | List of node taints to tolerate | `[]` |
+
+### Admin/Proxy listener override
+
+If you specify `env.admin_listen` or `env.proxy_listen`, this chart will use
+the value provided by you as opposed to constructing a listen variable
+from fields like `proxy.http.containerPort` and `proxy.http.enabled`. This allows
+you to be more prescriptive when defining listen directives.
+
+**Note:** Overriding `env.proxy_listen` and `env.admin_listen` will potentially cause
+`admin.containerPort`, `proxy.http.containerPort` and `proxy.tls.containerPort` to become out of sync,
+and therefore must be updated accordingly.
+
+I.E. updatating to `env.proxy_listen: 0.0.0.0:4444, 0.0.0.0:4443 ssl` will need
+`proxy.http.containerPort: 4444` and `proxy.tls.containerPort: 4443` to be set in order
+for the service definition to work properly.
+
+### Kong-specific parameters
+
+Kong has a choice of either Postgres or Cassandra as a backend datatstore.
+This chart allows you to choose either of them with the `env.database`
+parameter. Postgres is chosen by default.
+
+Additionally, this chart allows you to use your own database or spin up a new
+instance by using the `postgres.enabled` or `cassandra.enabled` parameters.
+Enabling both will create both databases in your cluster, but only one
+will be used by Kong based on the `env.database` parameter.
+Postgres is enabled by default.
+
+| Parameter | Description | Default |
+| ------------------------------ | -------------------------------------------------------------------- | ------------------- |
+| cassandra.enabled | Spin up a new cassandra cluster for Kong | `false` |
+| postgresql.enabled | Spin up a new postgres instance for Kong | `true` |
+| waitImage.repository | Image used to wait for database to become ready | `busybox` |
+| waitImage.tag | Tag for image used to wait for database to become ready | `latest` |
+| env.database | Choose either `postgres` or `cassandra` | `postgres` |
+| env.pg_user | Postgres username | `kong` |
+| env.pg_database | Postgres database name | `kong` |
+| env.pg_password | Postgres database password (required if you are using your own database)| `kong` |
+| env.pg_host | Postgres database host (required if you are using your own database) | `` |
+| env.pg_port | Postgres database port | `5432` |
+| env.cassandra_contact_points | Cassandra contact points (required if you are using your own database) | `` |
+| env.cassandra_port | Cassandra query port | `9042` |
+| env.cassandra_keyspace | Cassandra keyspace | `kong` |
+| env.cassandra_repl_factor | Replication factor for the Kong keyspace | `2` |
+
+
+All `kong.env` parameters can also accept a mapping instead of a value to ensure the parameters can be set through configmaps and secrets.
+
+An example :
+
+```yaml
+kong:
+ env:
+ pg_user: kong
+ pg_password:
+ valueFrom:
+ secretKeyRef:
+ key: kong
+ name: postgres
+```
+
+
+For complete list of Kong configurations please check https://getkong.org/docs/latest/configuration/.
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install stable/kong --name my-release \
+ --set=image.tag=1.2,env.database=cassandra,cassandra.enabled=true
+```
+
+Alternatively, a YAML file that specifies the values for the above parameters
+can be provided while installing the chart. For example,
+
+```console
+$ helm install stable/kong --name my-release -f values.yaml
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+### Kong Enterprise-specific parameters
+
+Kong Enterprise requires some additional configuration not needed when using
+Kong OSS. Some of the more important configuration is grouped in sections
+under the `.enterprise` key in values.yaml, though most enterprise-specific
+configuration can be placed under the `.env` key.
+
+To use Kong Enterprise, change your image to a Kong Enterprise image and set
+`.enterprise.enabled: true` in values.yaml to render Enterprise sections of the
+templates. Review the sections below for other settings you should consider
+configuring before installing the chart.
+
+#### Service location hints
+
+Kong Enterprise add two GUIs, Kong Manager and the Kong Developer Portal, that
+must know where other Kong services (namely the admin and files APIs) can be
+accessed in order to function properly. Kong's default behavior for attempting
+to locate these absent configuration is unlikely to work in common Kubernetes
+environments. Because of this, you should set each of `admin_gui_url`,
+`admin_api_uri`, `proxy_url`, `portal_api_url`, `portal_gui_host`, and
+`portal_gui_protocol` under the `.env` key in values.yaml to locations where
+each of their respective services can be accessed to ensure that Kong services
+can locate one another and properly set CORS headers. See the [Property Reference documentation](https://docs.konghq.com/enterprise/0.35-x/property-reference/)
+for more details on these settings.
+
+#### License
+
+All Kong Enterprise deployments require a license. If you do not have a copy
+of yours, please contact Kong Support. Once you have it, you will need to
+store it in a Secret. Save your secret in a file named `license` (no extension)
+and then create and inspect your secret:
+
+```
+$ kubectl create secret generic kong-enterprise-license --from-file=./license
+$ kubectl get secret kong-enterprise-license -o yaml
+apiVersion: v1
+data:
+ license: eyJsaWNlbnNlIjp7InNpZ25hdHVyZSI6IkhFWSBJIFNFRSBZT1UgUEVFS0lORyBJTlNJREUgTVkgQkFTRTY0IEVYQU1QTEUiLCJwYXlsb2FkIjp7ImN1c3RvbWVyIjoiV0VMTCBUT08gQkFEIiwibGljZW5zZV9jcmVhdGlvbl9kYXRlIjoiMjAxOC0wNi0wNSIsInByb2R1Y3Rfc3Vic2NyaXB0aW9uIjoiVEhFUkVTIE5PVEhJTkcgSEVSRSIsImFkbWluX3NlYXRzIjoiNSIsInN1cHBvcnRfcGxhbiI6IkZha2UiLCJsaWNlbnNlX2V4cGlyYXRpb25fZGF0ZSI6IjIwMjAtMjAtMjAiLCJsaWNlbnNlX2tleSI6IlRTT0kgWkhJViJ9LCJ2ZXJzaW9uIjoxfX0K
+kind: Secret
+metadata:
+ creationTimestamp: "2019-05-17T21:45:16Z"
+ name: kong-enterprise-license
+ namespace: default
+ resourceVersion: "48695485"
+ selfLink: /api/v1/namespaces/default/secrets/kong-enterprise-license
+ uid: 0f2e8903-78ed-11e9-b1a6-42010a8a02ec
+type: Opaque
+```
+Set the secret name in values.yaml, in the `.enterprise.license_secret` key.
+
+#### RBAC
+
+Note that you can create a default RBAC superuser when initially setting up an
+environment, by setting the `KONG_PASSWORD` environment variable on the initial
+migration Job's Pod. This will create a `kong_admin` admin whose token and
+basic-auth password match the value of `KONG_PASSWORD`
+
+Using RBAC within Kubernetes environments requires providing Kubernetes an RBAC
+user for its readiness and liveness checks. We recommend creating a user that
+has permission to read `/status` and nothing else. For example, with RBAC still
+disabled:
+
+```
+$ curl -sX POST http://admin.kong.example/rbac/users --data name=statuschecker --data user_token=REPLACE_WITH_SOME_TOKEN
+{"user_token_ident":"45239","user_token":"$2b$09$cL.xbvRQCzE35A0osl8VTej7u0BgJOIgpTVjxpwZ1U8.jNdMwyQRW","id":"fe8824dc-09a7-4b68-b5e6-541e4b9b4ced","name":"statuschecker","enabled":true,"comment":null,"created_at":1558131229}
+
+$ curl -sX POST http://admin.kong.example/rbac/roles --data name=read-status
+{"comment":null,"created_at":1558131353,"id":"e32507a5-e636-40b2-88c0-090042db7d79","name":"read-status","is_default":false}
+
+$ curl -sX POST http://admin.kong.example/rbac/roles/read-status/endpoints --data endpoint="/status" --data actions=read
+{"endpoint":"\/status","created_at":1558131423,"workspace":"default","actions":["read"],"negative":false,"role":{"id":"e32507a5-e636-40b2-88c0-090042db7d79"}}
+
+$ curl -sX POST http://admin.kong.example/rbac/users/statuschecker/roles --data roles=read-status
+{"roles":[{"created_at":1558131353,"id":"e32507a5-e636-40b2-88c0-090042db7d79","name":"read-status"}],"user":{"user_token_ident":"45239","user_token":"$2b$09$cL.xbvRQCzE35A0osl8VTej7u0BgJOIgpTVjxpwZ1U8.jNdMwyQRW","id":"fe8824dc-09a7-4b68-b5e6-541e4b9b4ced","name":"statuschecker","comment":null,"enabled":true,"created_at":1558131229}}
+```
+Probes will then need to include that user's token, e.g. for the readinessProbe:
+
+```
+readinessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTP
+ httpHeaders:
+ - name: Kong-Admin-Token
+ value: REPLACE_WITH_SOME_TOKEN
+ ...
+```
+
+Note that RBAC is **NOT** currently enabled on the admin API container for the
+controller Pod when the ingress controller is enabled. This admin API container
+is not exposed outside the Pod, so only the controller can interact with it. We
+intend to add RBAC to this container in the future after updating the controller
+to add support for storing its RBAC token in a Secret, as currently it would
+need to be stored in plaintext. RBAC is still enforced on the admin API of the
+main deployment when using the ingress controller, as that admin API *is*
+accessible outside the Pod.
+
+#### Sessions
+
+Login sessions for Kong Manager and the Developer Portal make use of [the Kong
+Sessions plugin](https://docs.konghq.com/enterprise/0.35-x/kong-manager/authentication/sessions/).
+Their configuration must be stored in Secrets, as it contains an HMAC key.
+If using either RBAC or the Portal, create a Secret with `admin_gui_session_conf`
+and `portal_session_conf` keys.
+
+```
+$ cat admin_gui_session_conf
+{"cookie_name":"admin_session","cookie_samesite":"off","secret":"admin-secret-CHANGEME","cookie_secure":true,"storage":"kong"}
+$ cat portal_session_conf
+{"cookie_name":"portal_session","cookie_samesite":"off","secret":"portal-secret-CHANGEME","cookie_secure":true,"storage":"kong"}
+$ kubectl create secret generic kong-session-config --from-file=admin_gui_session_conf --from-file=portal_session_conf
+secret/kong-session-config created
+```
+The exact plugin settings may vary in your environment. The `secret` should
+always be changed for both configurations.
+
+After creating your secret, set its name in values.yaml, in the
+`.enterprise.rbac.session_conf_secret` and
+`.enterprise.rbac.session_conf_secret` keys.
+
+#### Email/SMTP
+
+Email is used to send invitations for [Kong Admins](https://docs.konghq.com/enterprise/enterprise/0.35-x/kong-manager/networking/email/)
+and [Developers](https://docs.konghq.com/enterprise/enterprise/0.35-x/developer-portal/configuration/smtp/).
+
+Email invitations rely on setting a number of SMTP settings at once. For
+convenience, these are grouped under the `.enterprise.smtp` key in values.yaml.
+Setting `.enterprise.smtp.disabled: true` will set `KONG_SMTP_MOCK=on` and
+allow Admin/Developer invites to proceed without sending email. Note, however,
+that these have limited functionality without sending email.
+
+If your SMTP server requires authentication, you should the `username` and
+`smtp_password_secret` keys under `.enterprise.smtp.auth`.
+`smtp_password_secret` must be a Secret containing an `smtp_password` key whose
+value is your SMTP password.
+
+### Kong Ingress Controller
+
+Kong Ingress Controller's primary purpose is to satisfy Ingress resources
+created in your Kubernetes cluster.
+It uses CRDs for more fine grained control over routing and
+for Kong specific configuration.
+To deploy the ingress controller together with
+kong run the following command:
+
+```bash
+# without a database
+helm install stable/kong --set ingressController.enabled=true \
+ --set postgresql.enabled=false --set env.database=off
+# with a database
+helm install stable/kong --set ingressController.enabled=true
+```
+
+If you like to use a static IP:
+
+```shell
+helm install stable/kong --set ingressController.enabled=true --set proxy.loadBalancerIP=[Your IP goes there] --set proxy.type=LoadBalancer --name kong --namespace kong
+```
+
+**Note**: Kong Ingress controller doesn't support custom SSL certificates
+on Admin port. We will be removing this limitation in the future.
+
+Kong ingress controller relies on several Custom Resource Definition objects to
+declare the the Kong configurations and synchronize the configuration with the
+Kong admin API. Each of this new objects declared in Kubernetes have a
+one-to-one relation with a Kong resource.
+The custom resources are:
+
+- KongConsumer
+- KongCredential
+- KongPlugin
+- KongIngress
+
+You can can learn about kong ingress custom resource definitions [here](https://github.com/Kong/kubernetes-ingress-controller/blob/master/docs/custom-resources.md).
+
+| Parameter | Description | Default |
+| --------------- | ----------------------------------------- | ---------------------------------------------------------------------------- |
+| enabled | Deploy the ingress controller, rbac and crd | false |
+| replicaCount | Number of desired ingress controllers | 1 |
+| image.repository | Docker image with the ingress controller | kong-docker-kubernetes-ingress-controller.bintray.io/kong-ingress-controller |
+| image.tag | Version of the ingress controller | 0.2.0 |
+| readinessProbe | Kong ingress controllers readiness probe | |
+| livenessProbe | Kong ingress controllers liveness probe | |
+| ingressClass | The ingress-class value for controller | nginx
--- /dev/null
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+OWNERS
--- /dev/null
+appVersion: 3.11.3
+description: Apache Cassandra is a free and open-source distributed database management
+ system designed to handle large amounts of data across many commodity servers, providing
+ high availability with no single point of failure.
+engine: gotpl
+home: http://cassandra.apache.org
+icon: https://upload.wikimedia.org/wikipedia/commons/thumb/5/5e/Cassandra_logo.svg/330px-Cassandra_logo.svg.png
+keywords:
+- cassandra
+- database
+- nosql
+maintainers:
+- email: goonohc@gmail.com
+ name: KongZ
+name: cassandra
+version: 0.10.5
--- /dev/null
+# Cassandra
+A Cassandra Chart for Kubernetes
+
+## Install Chart
+To install the Cassandra Chart into your Kubernetes cluster (This Chart requires persistent volume by default, you may need to create a storage class before install chart. To create storage class, see [Persist data](#persist_data) section)
+
+```bash
+helm install --namespace "cassandra" -n "cassandra" incubator/cassandra
+```
+
+After installation succeeds, you can get a status of Chart
+
+```bash
+helm status "cassandra"
+```
+
+If you want to delete your Chart, use this command
+```bash
+helm delete --purge "cassandra"
+```
+
+## Persist data
+You need to create `StorageClass` before able to persist data in persistent volume.
+To create a `StorageClass` on Google Cloud, run the following
+
+```bash
+kubectl create -f sample/create-storage-gce.yaml
+```
+
+And set the following values in `values.yaml`
+
+```yaml
+persistence:
+ enabled: true
+```
+
+If you want to create a `StorageClass` on other platform, please see documentation here [https://kubernetes.io/docs/user-guide/persistent-volumes/](https://kubernetes.io/docs/user-guide/persistent-volumes/)
+
+When running a cluster without persistence, the termination of a pod will first initiate a decommissioning of that pod.
+Depending on the amount of data stored inside the cluster this may take a while. In order to complete a graceful
+termination, pods need to get more time for it. Set the following values in `values.yaml`:
+
+```yaml
+podSettings:
+ terminationGracePeriodSeconds: 1800
+```
+
+## Install Chart with specific cluster size
+By default, this Chart will create a cassandra with 3 nodes. If you want to change the cluster size during installation, you can use `--set config.cluster_size={value}` argument. Or edit `values.yaml`
+
+For example:
+Set cluster size to 5
+
+```bash
+helm install --namespace "cassandra" -n "cassandra" --set config.cluster_size=5 incubator/cassandra/
+```
+
+## Install Chart with specific resource size
+By default, this Chart will create a cassandra with CPU 2 vCPU and 4Gi of memory which is suitable for development environment.
+If you want to use this Chart for production, I would recommend to update the CPU to 4 vCPU and 16Gi. Also increase size of `max_heap_size` and `heap_new_size`.
+To update the settings, edit `values.yaml`
+
+## Install Chart with specific node
+Sometime you may need to deploy your cassandra to specific nodes to allocate resources. You can use node selector by edit `nodes.enabled=true` in `values.yaml`
+For example, you have 6 vms in node pools and you want to deploy cassandra to node which labeled as `cloud.google.com/gke-nodepool: pool-db`
+
+Set the following values in `values.yaml`
+
+```yaml
+nodes:
+ enabled: true
+ selector:
+ nodeSelector:
+ cloud.google.com/gke-nodepool: pool-db
+```
+
+## Configuration
+
+The following table lists the configurable parameters of the Cassandra chart and their default values.
+
+| Parameter | Description | Default |
+| ----------------------- | --------------------------------------------- | ---------------------------------------------------------- |
+| `image.repo` | `cassandra` image repository | `cassandra` |
+| `image.tag` | `cassandra` image tag | `3.11.3` |
+| `image.pullPolicy` | Image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` |
+| `image.pullSecrets` | Image pull secrets | `nil` |
+| `config.cluster_domain` | The name of the cluster domain. | `cluster.local` |
+| `config.cluster_name` | The name of the cluster. | `cassandra` |
+| `config.cluster_size` | The number of nodes in the cluster. | `3` |
+| `config.seed_size` | The number of seed nodes used to bootstrap new clients joining the cluster. | `2` |
+| `config.seeds` | The comma-separated list of seed nodes. | Automatically generated according to `.Release.Name` and `config.seed_size` |
+| `config.num_tokens` | Initdb Arguments | `256` |
+| `config.dc_name` | Initdb Arguments | `DC1` |
+| `config.rack_name` | Initdb Arguments | `RAC1` |
+| `config.endpoint_snitch` | Initdb Arguments | `SimpleSnitch` |
+| `config.max_heap_size` | Initdb Arguments | `2048M` |
+| `config.heap_new_size` | Initdb Arguments | `512M` |
+| `config.ports.cql` | Initdb Arguments | `9042` |
+| `config.ports.thrift` | Initdb Arguments | `9160` |
+| `config.ports.agent` | The port of the JVM Agent (if any) | `nil` |
+| `config.start_rpc` | Initdb Arguments | `false` |
+| `configOverrides` | Overrides config files in /etc/cassandra dir | `{}` |
+| `commandOverrides` | Overrides default docker command | `[]` |
+| `argsOverrides` | Overrides default docker args | `[]` |
+| `env` | Custom env variables | `{}` |
+| `persistence.enabled` | Use a PVC to persist data | `true` |
+| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) |
+| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` |
+| `persistence.size` | Size of data volume | `10Gi` |
+| `resources` | CPU/Memory resource requests/limits | Memory: `4Gi`, CPU: `2` |
+| `service.type` | k8s service type exposing ports, e.g. `NodePort`| `ClusterIP` |
+| `podManagementPolicy` | podManagementPolicy of the StatefulSet | `OrderedReady` |
+| `podDisruptionBudget` | Pod distruption budget | `{}` |
+| `podAnnotations` | pod annotations for the StatefulSet | `{}` |
+| `updateStrategy.type` | UpdateStrategy of the StatefulSet | `OnDelete` |
+| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `90` |
+| `livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
+| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` |
+| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `90` |
+| `readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
+| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` |
+| `rbac.create` | Specifies whether RBAC resources should be created | `true` |
+| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
+| `serviceAccount.name` | The name of the ServiceAccount to use | |
+| `backup.enabled` | Enable backup on chart installation | `false` |
+| `backup.schedule` | Keyspaces to backup, each with cron time | |
+| `backup.annotations` | Backup pod annotations | iam.amazonaws.com/role: `cain` |
+| `backup.image.repo` | Backup image repository | `nuvo/cain` |
+| `backup.image.tag` | Backup image tag | `0.4.1` |
+| `backup.extraArgs` | Additional arguments for cain | `[]` |
+| `backup.env` | Backup environment variables | AWS_REGION: `us-east-1` |
+| `backup.resources` | Backup CPU/Memory resource requests/limits | Memory: `1Gi`, CPU: `1` |
+| `backup.destination` | Destination to store backup artifacts | `s3://bucket/cassandra` |
+| `exporter.enabled` | Enable Cassandra exporter | `false` |
+| `exporter.image.repo` | Exporter image repository | `criteord/cassandra_exporter` |
+| `exporter.image.tag` | Exporter image tag | `2.0.2` |
+| `exporter.port` | Exporter port | `5556` |
+| `exporter.jvmOpts` | Exporter additional JVM options | |
+| `affinity` | Kubernetes node affinity | `{}` |
+| `tolerations` | Kubernetes node tolerations | `[]` |
+
+
+## Scale cassandra
+When you want to change the cluster size of your cassandra, you can use the helm upgrade command.
+
+```bash
+helm upgrade --set config.cluster_size=5 cassandra incubator/cassandra
+```
+
+## Get cassandra status
+You can get your cassandra cluster status by running the command
+
+```bash
+kubectl exec -it --namespace cassandra $(kubectl get pods --namespace cassandra -l app=cassandra-cassandra -o jsonpath='{.items[0].metadata.name}') nodetool status
+```
+
+Output
+```bash
+Datacenter: asia-east1
+======================
+Status=Up/Down
+|/ State=Normal/Leaving/Joining/Moving
+-- Address Load Tokens Owns (effective) Host ID Rack
+UN 10.8.1.11 108.45 KiB 256 66.1% 410cc9da-8993-4dc2-9026-1dd381874c54 a
+UN 10.8.4.12 84.08 KiB 256 68.7% 96e159e1-ef94-406e-a0be-e58fbd32a830 c
+UN 10.8.3.6 103.07 KiB 256 65.2% 1a42b953-8728-4139-b070-b855b8fff326 b
+```
+
+## Benchmark
+You can use [cassandra-stress](https://docs.datastax.com/en/cassandra/3.0/cassandra/tools/toolsCStress.html) tool to run the benchmark on the cluster by the following command
+
+```bash
+kubectl exec -it --namespace cassandra $(kubectl get pods --namespace cassandra -l app=cassandra-cassandra -o jsonpath='{.items[0].metadata.name}') cassandra-stress
+```
+
+Example of `cassandra-stress` argument
+ - Run both read and write with ration 9:1
+ - Operator total 1 million keys with uniform distribution
+ - Use QUORUM for read/write
+ - Generate 50 threads
+ - Generate result in graph
+ - Use NetworkTopologyStrategy with replica factor 2
+
+```bash
+cassandra-stress mixed ratio\(write=1,read=9\) n=1000000 cl=QUORUM -pop dist=UNIFORM\(1..1000000\) -mode native cql3 -rate threads=50 -log file=~/mixed_autorate_r9w1_1M.log -graph file=test2.html title=test revision=test2 -schema "replication(strategy=NetworkTopologyStrategy, factor=2)"
+```
--- /dev/null
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: generic
+provisioner: kubernetes.io/gce-pd
+parameters:
+ type: pd-ssd
--- /dev/null
+Cassandra CQL can be accessed via port {{ .Values.config.ports.cql }} on the following DNS name from within your cluster:
+Cassandra Thrift can be accessed via port {{ .Values.config.ports.thrift }} on the following DNS name from within your cluster:
+
+If you want to connect to the remote instance with your local Cassandra CQL cli. To forward the API port to localhost:9042 run the following:
+- kubectl port-forward --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{ .items[0].metadata.name }') 9042:{{ .Values.config.ports.cql }}
+
+If you want to connect to the Cassandra CQL run the following:
+{{- if contains "NodePort" .Values.service.type }}
+- export CQL_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "cassandra.fullname" . }})
+- export CQL_HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+- cqlsh $CQL_HOST $CQL_PORT
+
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "cassandra.fullname" . }}'
+- export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "cassandra.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+- echo cqlsh $SERVICE_IP
+{{- else if contains "ClusterIP" .Values.service.type }}
+- kubectl port-forward --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "cassandra.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 9042:{{ .Values.config.ports.cql }}
+ echo cqlsh 127.0.0.1 9042
+{{- end }}
+
+You can also see the cluster status by run the following:
+- kubectl exec -it --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{.items[0].metadata.name}') nodetool status
+
+To tail the logs for the Cassandra pod run the following:
+- kubectl logs -f --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{ .items[0].metadata.name }')
+
+{{- if not .Values.persistence.enabled }}
+
+Note that the cluster is running with node-local storage instead of PersistentVolumes. In order to prevent data loss,
+pods will be decommissioned upon termination. Decommissioning may take some time, so you might also want to adjust the
+pod termination gace period, which is currently set to {{ .Values.podSettings.terminationGracePeriodSeconds }} seconds.
+
+{{- end}}
--- /dev/null
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "cassandra.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "cassandra.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "cassandra.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "cassandra.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "cassandra.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
--- /dev/null
+{{- if .Values.backup.enabled }}
+{{- $release := .Release }}
+{{- $values := .Values }}
+{{- $backup := $values.backup }}
+{{- range $index, $schedule := $backup.schedule }}
+---
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+ name: {{ template "cassandra.fullname" $ }}-backup-{{ $schedule.keyspace | replace "_" "-" }}
+ labels:
+ app: {{ template "cassandra.name" $ }}-cain
+ chart: {{ template "cassandra.chart" $ }}
+ release: "{{ $release.Name }}"
+ heritage: "{{ $release.Service }}"
+spec:
+ schedule: {{ $schedule.cron | quote }}
+ concurrencyPolicy: Forbid
+ startingDeadlineSeconds: 120
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ annotations:
+ {{ toYaml $backup.annotations }}
+ spec:
+ restartPolicy: OnFailure
+ serviceAccountName: {{ template "cassandra.serviceAccountName" $ }}
+ containers:
+ - name: cassandra-backup
+ image: "{{ $backup.image.repos }}:{{ $backup.image.tag }}"
+ command: ["cain"]
+ args:
+ - backup
+ - --namespace
+ - {{ $release.Namespace }}
+ - --selector
+ - release={{ $release.Name }},app={{ template "cassandra.name" $ }}
+ - --keyspace
+ - {{ $schedule.keyspace }}
+ - --dst
+ - {{ $backup.destination }}
+ {{- with $backup.extraArgs }}
+{{ toYaml . | indent 12 }}
+ {{- end }}
+ {{- with $backup.env }}
+ env:
+{{ toYaml . | indent 12 }}
+ {{- end }}
+ {{- with $backup.resources }}
+ resources:
+{{ toYaml . | indent 14 }}
+ {{- end }}
+ affinity:
+ podAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - {{ template "cassandra.fullname" $ }}
+ - key: release
+ operator: In
+ values:
+ - {{ $release.Name }}
+ topologyKey: "kubernetes.io/hostname"
+ {{- with $values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 10 }}
+ {{- end }}
+{{- end }}
+{{- end }}
--- /dev/null
+{{- if .Values.backup.enabled }}
+{{- if .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "cassandra.serviceAccountName" . }}
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ template "cassandra.chart" . }}
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+---
+{{- end }}
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ template "cassandra.fullname" . }}-backup
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ template "cassandra.chart" . }}
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+rules:
+- apiGroups: [""]
+ resources: ["pods", "pods/log"]
+ verbs: ["get", "list"]
+- apiGroups: [""]
+ resources: ["pods/exec"]
+ verbs: ["create"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ template "cassandra.fullname" . }}-backup
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ template "cassandra.chart" . }}
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "cassandra.fullname" . }}-backup
+subjects:
+- kind: ServiceAccount
+ name: {{ template "cassandra.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end }}
+{{- end }}
--- /dev/null
+{{- if .Values.configOverrides }}
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ template "cassandra.name" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+{{ toYaml .Values.configOverrides | indent 2 }}
+{{- end }}
--- /dev/null
+{{- if .Values.podDisruptionBudget -}}
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ name: {{ template "cassandra.fullname" . }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "cassandra.name" . }}
+ release: {{ .Release.Name }}
+{{ toYaml .Values.podDisruptionBudget | indent 2 }}
+{{- end -}}
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "cassandra.fullname" . }}
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ template "cassandra.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ clusterIP: None
+ type: {{ .Values.service.type }}
+ ports:
+ - name: intra
+ port: 7000
+ targetPort: 7000
+ - name: tls
+ port: 7001
+ targetPort: 7001
+ - name: jmx
+ port: 7199
+ targetPort: 7199
+ - name: cql
+ port: {{ default 9042 .Values.config.ports.cql }}
+ targetPort: {{ default 9042 .Values.config.ports.cql }}
+ - name: thrift
+ port: {{ default 9160 .Values.config.ports.thrift }}
+ targetPort: {{ default 9160 .Values.config.ports.thrift }}
+ {{- if .Values.config.ports.agent }}
+ - name: agent
+ port: {{ .Values.config.ports.agent }}
+ targetPort: {{ .Values.config.ports.agent }}
+ {{- end }}
+ selector:
+ app: {{ template "cassandra.name" . }}
+ release: {{ .Release.Name }}
--- /dev/null
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ template "cassandra.fullname" . }}
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ template "cassandra.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "cassandra.name" . }}
+ release: {{ .Release.Name }}
+ serviceName: {{ template "cassandra.fullname" . }}
+ replicas: {{ .Values.config.cluster_size }}
+ podManagementPolicy: {{ .Values.podManagementPolicy }}
+ updateStrategy:
+ type: {{ .Values.updateStrategy.type }}
+ template:
+ metadata:
+ labels:
+ app: {{ template "cassandra.name" . }}
+ release: {{ .Release.Name }}
+{{- if .Values.podLabels }}
+{{ toYaml .Values.podLabels | indent 8 }}
+{{- end }}
+{{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+{{- end }}
+ spec:
+ hostNetwork: {{ .Values.hostNetwork }}
+{{- if .Values.selector }}
+{{ toYaml .Values.selector | indent 6 }}
+{{- end }}
+ {{- if .Values.securityContext.enabled }}
+ securityContext:
+ fsGroup: {{ .Values.securityContext.fsGroup }}
+ runAsUser: {{ .Values.securityContext.runAsUser }}
+ {{- end }}
+{{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+{{- end }}
+{{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+{{- end }}
+ containers:
+{{- if .Values.exporter.enabled }}
+ - name: cassandra-exporter
+ image: "{{ .Values.exporter.image.repo }}:{{ .Values.exporter.image.tag }}"
+ env:
+ - name: CASSANDRA_EXPORTER_CONFIG_listenPort
+ value: {{ .Values.exporter.port | quote }}
+ - name: JVM_OPTS
+ value: {{ .Values.exporter.jvmOpts | quote }}
+ ports:
+ - name: metrics
+ containerPort: {{ .Values.exporter.port }}
+ protocol: TCP
+ - name: jmx
+ containerPort: 5555
+ livenessProbe:
+ tcpSocket:
+ port: {{ .Values.exporter.port }}
+ readinessProbe:
+ httpGet:
+ path: /metrics
+ port: {{ .Values.exporter.port }}
+ initialDelaySeconds: 20
+ timeoutSeconds: 45
+{{- end }}
+ - name: {{ template "cassandra.fullname" . }}
+ image: "{{ .Values.image.repo }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+{{- if .Values.commandOverrides }}
+ command: {{ .Values.commandOverrides }}
+{{- end }}
+{{- if .Values.argsOverrides }}
+ args: {{ .Values.argsOverrides }}
+{{- end }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ env:
+ {{- $seed_size := default 1 .Values.config.seed_size | int -}}
+ {{- $global := . }}
+ - name: CASSANDRA_SEEDS
+ {{- if .Values.hostNetwork }}
+ value: {{ required "You must fill \".Values.config.seeds\" with list of Cassandra seeds when hostNetwork is set to true" .Values.config.seeds | quote }}
+ {{- else }}
+ value: "{{- range $i, $e := until $seed_size }}{{ template "cassandra.fullname" $global }}-{{ $i }}.{{ template "cassandra.fullname" $global }}.{{ $global.Release.Namespace }}.svc.{{ $global.Values.config.cluster_domain }}{{- if (lt ( add1 $i ) $seed_size ) }},{{- end }}{{- end }}"
+ {{- end }}
+ - name: MAX_HEAP_SIZE
+ value: {{ default "8192M" .Values.config.max_heap_size | quote }}
+ - name: HEAP_NEWSIZE
+ value: {{ default "200M" .Values.config.heap_new_size | quote }}
+ - name: CASSANDRA_ENDPOINT_SNITCH
+ value: {{ default "SimpleSnitch" .Values.config.endpoint_snitch | quote }}
+ - name: CASSANDRA_CLUSTER_NAME
+ value: {{ default "Cassandra" .Values.config.cluster_name | quote }}
+ - name: CASSANDRA_DC
+ value: {{ default "DC1" .Values.config.dc_name | quote }}
+ - name: CASSANDRA_RACK
+ value: {{ default "RAC1" .Values.config.rack_name | quote }}
+ - name: CASSANDRA_START_RPC
+ value: {{ default "false" .Values.config.start_rpc | quote }}
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ {{- range $key, $value := .Values.env }}
+ - name: {{ $key | quote }}
+ value: {{ $value | quote }}
+ {{- end }}
+ livenessProbe:
+ exec:
+ command: [ "/bin/sh", "-c", "nodetool status" ]
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+ readinessProbe:
+ exec:
+ command: [ "/bin/sh", "-c", "nodetool status | grep -E \"^UN\\s+${POD_IP}\"" ]
+ initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+ ports:
+ - name: intra
+ containerPort: 7000
+ - name: tls
+ containerPort: 7001
+ - name: jmx
+ containerPort: 7199
+ - name: cql
+ containerPort: {{ default 9042 .Values.config.ports.cql }}
+ - name: thrift
+ containerPort: {{ default 9160 .Values.config.ports.thrift }}
+ {{- if .Values.config.ports.agent }}
+ - name: agent
+ containerPort: {{ .Values.config.ports.agent }}
+ {{- end }}
+ volumeMounts:
+ - name: data
+ mountPath: /var/lib/cassandra
+{{- range $key, $value := .Values.configOverrides }}
+ - name: cassandra-config-{{ $key | replace "." "-" }}
+ mountPath: /etc/cassandra/{{ $key }}
+ subPath: {{ $key }}
+{{- end }}
+ {{- if not .Values.persistence.enabled }}
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/sh", "-c", "exec nodetool decommission"]
+ {{- end }}
+ terminationGracePeriodSeconds: {{ default 30 .Values.podSettings.terminationGracePeriodSeconds }}
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ - name: {{ .Values.image.pullSecrets }}
+ {{- end }}
+{{- if or .Values.configOverrides (not .Values.persistence.enabled) }}
+ volumes:
+{{- end }}
+{{- range $key, $value := .Values.configOverrides }}
+ - configMap:
+ name: cassandra
+ name: cassandra-config-{{ $key | replace "." "-" }}
+{{- end }}
+{{- if not .Values.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+{{- else }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ template "cassandra.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+ {{- if .Values.persistence.storageClass }}
+ {{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+ {{- end }}
+ {{- end }}
+{{- end }}
--- /dev/null
+## Cassandra image version
+## ref: https://hub.docker.com/r/library/cassandra/
+image:
+ repo: cassandra
+ tag: 3.11.3
+ pullPolicy: IfNotPresent
+ ## Specify ImagePullSecrets for Pods
+ ## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+ # pullSecrets: myregistrykey
+
+## Specify a service type
+## ref: http://kubernetes.io/docs/user-guide/services/
+service:
+ type: ClusterIP
+
+## Persist data to a persistent volume
+persistence:
+ enabled: true
+ ## cassandra data Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ # storageClass: "-"
+ accessMode: ReadWriteOnce
+ size: 10Gi
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+## Minimum memory for development is 4GB and 2 CPU cores
+## Minimum memory for production is 8GB and 4 CPU cores
+## ref: http://docs.datastax.com/en/archived/cassandra/2.0/cassandra/architecture/architecturePlanningHardware_c.html
+resources: {}
+ # requests:
+ # memory: 4Gi
+ # cpu: 2
+ # limits:
+ # memory: 4Gi
+ # cpu: 2
+
+## Change cassandra configuration parameters below:
+## ref: http://docs.datastax.com/en/cassandra/3.0/cassandra/configuration/configCassandra_yaml.html
+## Recommended max heap size is 1/2 of system memory
+## Recommended heap new size is 1/4 of max heap size
+## ref: http://docs.datastax.com/en/cassandra/3.0/cassandra/operations/opsTuneJVM.html
+config:
+ cluster_domain: cluster.local
+ cluster_name: cassandra
+ cluster_size: 3
+ seed_size: 2
+ num_tokens: 256
+ # If you want Cassandra to use this datacenter and rack name,
+ # you need to set endpoint_snitch to GossipingPropertyFileSnitch.
+ # Otherwise, these values are ignored and datacenter1 and rack1
+ # are used.
+ dc_name: DC1
+ rack_name: RAC1
+ endpoint_snitch: SimpleSnitch
+ max_heap_size: 2048M
+ heap_new_size: 512M
+ start_rpc: false
+ ports:
+ cql: 9042
+ thrift: 9160
+ # If a JVM Agent is in place
+ # agent: 61621
+
+## Cassandra config files overrides
+configOverrides: {}
+
+## Cassandra docker command overrides
+commandOverrides: []
+
+## Cassandra docker args overrides
+argsOverrides: []
+
+## Custom env variables.
+## ref: https://hub.docker.com/_/cassandra/
+env: {}
+
+## Liveness and Readiness probe values.
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+livenessProbe:
+ initialDelaySeconds: 90
+ periodSeconds: 30
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+readinessProbe:
+ initialDelaySeconds: 90
+ periodSeconds: 30
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+
+## Configure node selector. Edit code below for adding selector to pods
+## ref: https://kubernetes.io/docs/user-guide/node-selection/
+# selector:
+ # nodeSelector:
+ # cloud.google.com/gke-nodepool: pool-db
+
+## Additional pod annotations
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+podAnnotations: {}
+
+## Additional pod labels
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+podLabels: {}
+
+## Additional pod-level settings
+podSettings:
+ # Change this to give pods more time to properly leave the cluster when not using persistent storage.
+ terminationGracePeriodSeconds: 30
+
+## Pod distruption budget
+podDisruptionBudget: {}
+ # maxUnavailable: 1
+ # minAvailable: 2
+
+podManagementPolicy: OrderedReady
+updateStrategy:
+ type: OnDelete
+
+## Pod Security Context
+securityContext:
+ enabled: false
+ fsGroup: 999
+ runAsUser: 999
+
+## Affinity for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+affinity: {}
+
+## Node tolerations for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+tolerations: []
+
+rbac:
+ # Specifies whether RBAC resources should be created
+ create: true
+
+serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use.
+ # If not set and create is true, a name is generated using the fullname template
+ # name:
+
+# Use host network for Cassandra pods
+# You must pass seed list into config.seeds property if set to true
+hostNetwork: false
+
+## Backup cronjob configuration
+## Ref: https://github.com/nuvo/cain
+backup:
+ enabled: false
+
+ # Schedule to run jobs. Must be in cron time format
+ # Ref: https://crontab.guru/
+ schedule:
+ - keyspace: keyspace1
+ cron: "0 7 * * *"
+ - keyspace: keyspace2
+ cron: "30 7 * * *"
+
+ annotations:
+ # Example for authorization to AWS S3 using kube2iam
+ # Can also be done using environment variables
+ iam.amazonaws.com/role: cain
+
+ image:
+ repos: nuvo/cain
+ tag: 0.4.1
+
+ # Additional arguments for cain
+ # Ref: https://github.com/nuvo/cain#usage
+ extraArgs: []
+
+ # Add additional environment variables
+ env:
+ # Example environment variable required for AWS credentials chain
+ - name: AWS_REGION
+ value: us-east-1
+
+ resources:
+ requests:
+ memory: 1Gi
+ cpu: 1
+ limits:
+ memory: 1Gi
+ cpu: 1
+
+ # Destination to store the backup artifacts
+ # Supported cloud storage services: AWS S3, Minio S3, Azure Blob Storage
+ # Additional support can added. Visit this repository for details
+ # Ref: https://github.com/nuvo/skbn
+ destination: s3://bucket/cassandra
+
+## Cassandra exported configuration
+## ref: https://github.com/criteo/cassandra_exporter
+exporter:
+ enabled: false
+ image:
+ repo: criteord/cassandra_exporter
+ tag: 2.0.2
+ port: 5556
+ jvmOpts: ""
--- /dev/null
+.git
+OWNERS
\ No newline at end of file
--- /dev/null
+appVersion: 10.6.0
+description: Chart for PostgreSQL, an object-relational database management system
+ (ORDBMS) with an emphasis on extensibility and on standards-compliance.
+engine: gotpl
+home: https://www.postgresql.org/
+icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png
+keywords:
+- postgresql
+- postgres
+- database
+- sql
+- replication
+- cluster
+maintainers:
+- email: containers@bitnami.com
+ name: Bitnami
+- email: cedric@desaintmartin.fr
+ name: desaintmartin
+name: postgresql
+sources:
+- https://github.com/bitnami/bitnami-docker-postgresql
+version: 3.9.5
--- /dev/null
+# PostgreSQL
+
+[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance.
+
+## TL;DR;
+
+```console
+$ helm install stable/postgresql
+```
+
+## Introduction
+
+This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.10+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+$ helm install --name my-release stable/postgresql
+```
+
+The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Configuration
+
+The following tables lists the configurable parameters of the PostgreSQL chart and their default values.
+
+| Parameter | Description | Default |
+|-----------------------------------------------|------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------|
+| `global.imageRegistry` | Global Docker Image registry | `nil` |
+| `image.registry` | PostgreSQL Image registry | `docker.io` |
+| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` |
+| `image.tag` | PostgreSQL Image tag | `{VERSION}` |
+| `image.pullPolicy` | PostgreSQL Image pull policy | `Always` |
+| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) |
+| `image.debug` | Specify if debug values should be set | `false` |
+| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
+| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` |
+| `volumePermissions.image.tag` | Init container volume-permissions image tag | `latest` |
+| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` |
+| `volumePermissions.securityContext.runAsUser` | User ID for the init container | `0` |
+| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` |
+| `replication.enabled` | Would you like to enable replication | `false` |
+| `replication.user` | Replication user | `repl_user` |
+| `replication.password` | Replication user password | `repl_password` |
+| `replication.slaveReplicas` | Number of slaves replicas | `1` |
+| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` |
+| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` |
+| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` |
+| `existingSecret` | Name of existing secret to use for PostgreSQL passwords | `nil` |
+| `postgresqlUsername` | PostgreSQL admin user | `postgres` |
+| `postgresqlPassword` | PostgreSQL admin password | _random 10 character alphanumeric string_ |
+| `postgresqlDatabase` | PostgreSQL database | `nil` |
+| `postgresqlConfiguration` | Runtime Config Parameters | `nil` |
+| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` |
+| `pgHbaConfiguration` | Content of pg\_hba.conf | `nil (do not create pg_hba.conf)` |
+| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`) | `nil` |
+| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files | `nil` |
+| `initdbScripts` | List of initdb scripts | `nil` |
+| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `nil` |
+| `service.type` | Kubernetes Service type | `ClusterIP` |
+| `service.port` | PostgreSQL port | `5432` |
+| `service.nodePort` | Kubernetes Service nodePort | `nil` |
+| `service.annotations` | Annotations for PostgreSQL service | {} |
+| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` |
+| `persistence.enabled` | Enable persistence using PVC | `true` |
+| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` | `nil` |
+| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` |
+| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` |
+| `persistence.accessMode` | PVC Access Mode for PostgreSQL volume | `ReadWriteOnce` |
+| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` |
+| `persistence.annotations` | Annotations for the PVC | `{}` |
+| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` |
+| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` |
+| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` |
+| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` |
+| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` |
+| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` |
+| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` |
+| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` |
+| `securityContext.enabled` | Enable security context | `true` |
+| `securityContext.fsGroup` | Group ID for the container | `1001` |
+| `securityContext.runAsUser` | User ID for the container | `1001` |
+| `livenessProbe.enabled` | Would you like a livessProbed to be enabled | `true` |
+| `networkPolicy.enabled` | Enable NetworkPolicy | `false` |
+| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
+| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 |
+| `livenessProbe.periodSeconds` | How often to perform the probe | 10 |
+| `livenessProbe.timeoutSeconds` | When the probe times out | 5 |
+| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 |
+| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 |
+| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` |
+| `readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 |
+| `readinessProbe.periodSeconds` | How often to perform the probe | 10 |
+| `readinessProbe.timeoutSeconds` | When the probe times out | 5 |
+| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 |
+| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 |
+| `metrics.enabled` | Start a prometheus exporter | `false` |
+| `metrics.service.type` | Kubernetes Service type | `ClusterIP` |
+| `service.clusterIP` | Static clusterIP or None for headless services | `nil` |
+| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{}` |
+| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` |
+| `metrics.image.registry` | PostgreSQL Image registry | `docker.io` |
+| `metrics.image.repository` | PostgreSQL Image name | `wrouesnel/postgres_exporter` |
+| `metrics.image.tag` | PostgreSQL Image tag | `{VERSION}` |
+| `metrics.image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` |
+| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) |
+| `extraEnv` | Any extra environment variables you would like to pass on to the pod | `{}` |
+| `updateStrategy` | Update strategy policy | `{type: "onDelete"}` |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install --name my-release \
+ --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \
+ stable/postgresql
+```
+
+The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install --name my-release -f values.yaml stable/postgresql
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+### postgresql.conf / pg_hba.conf files as configMap
+
+This helm chart also supports to customize the whole configuration file.
+
+Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server.
+
+Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}.
+
+In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options.
+
+### Allow settings to be loaded from files other than the default `postgresql.conf`
+
+If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory.
+Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`.
+
+Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option.
+
+## Initialize a fresh instance
+
+The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap.
+
+Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict.
+
+In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options.
+
+The allowed extensions are `.sh`, `.sql` and `.sql.gz`.
+
+## Production and horizontal scaling
+
+The following repo contains the recommended production settings for PostgreSQL server in an alternative [values file](values-production.yaml). Please read carefully the comments in the values-production.yaml file to set up your environment
+
+To horizontally scale this chart, first download the [values-production.yaml](values-production.yaml) file to your local folder, then:
+
+```console
+$ helm install --name my-release -f ./values-production.yaml stable/postgresql
+$ kubectl scale statefulset my-postgresql-slave --replicas=3
+```
+
+## Persistence
+
+The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container.
+
+Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
+See the [Configuration](#configuration) section to configure the PVC or to disable persistence.
+
+## Metrics
+
+The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml).
+
+The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details.
+
+## NetworkPolicy
+
+To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`.
+
+For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace:
+
+```console
+$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}"
+```
+
+With NetworkPolicy enabled, traffic will be limited to just port 5432.
+
+For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL.
+This label will be displayed in the output of a successful install.
+
+## Upgrade
+
+### 3.0.0
+
+This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods.
+It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride.
+
+#### Breaking changes
+
+- `affinty` has been renamed to `master.affinity` and `slave.affinity`.
+- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`.
+- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`.
+
+### 2.0.0
+
+In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps:
+
+ - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running
+
+ ```console
+$ kubectl get svc
+ ```
+
+- Install (not upgrade) the new version
+
+```console
+$ helm repo update
+$ helm install --name my-release stable/postgresql
+```
+
+- Connect to the new pod (you can obtain the name by running `kubectl get pods`):
+
+```console
+$ kubectl exec -it NAME bash
+```
+
+- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart:
+
+```console
+$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql
+```
+
+After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`).
+This operation could take some time depending on the database size.
+
+- Once you have the backup file, you can restore it with a command like the one below:
+
+```console
+$ psql -U postgres DATABASE_NAME < /tmp/backup.sql
+```
+
+In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt).
+
+If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below.
+
+```console
+$ psql -U postgres
+postgres=# drop database DATABASE_NAME;
+postgres=# create database DATABASE_NAME;
+postgres=# create user USER_NAME;
+postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD';
+postgres=# grant all privileges on database DATABASE_NAME to USER_NAME;
+postgres=# alter database DATABASE_NAME owner to USER_NAME;
+```
--- /dev/null
+Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map.
--- /dev/null
+If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files.
+These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`.
+
+More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file).
--- /dev/null
+You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image.
+
+More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository.
\ No newline at end of file
--- /dev/null
+{{- if contains .Values.service.type "LoadBalancer" }}
+{{- if not .Values.postgresqlPassword }}
+-------------------------------------------------------------------------------
+ WARNING
+
+ By specifying "serviceType=LoadBalancer" and not specifying "postgresqlPassword"
+ you have most likely exposed the PostgreSQL service externally without any
+ authentication mechanism.
+
+ For security reasons, we strongly suggest that you switch to "ClusterIP" or
+ "NodePort". As an alternative, you can also specify a valid password on the
+ "postgresqlPassword" parameter.
+
+-------------------------------------------------------------------------------
+{{- end }}
+{{- end }}
+
+** Please be patient while the chart is being deployed **
+
+PostgreSQL can be accessed via port 5432 on the following DNS name from within your cluster:
+
+ {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection
+{{- if .Values.replication.enabled }}
+ {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection
+{{- end }}
+To get the password for "{{ .Values.postgresqlUsername }}" run:
+
+ export POSTGRESQL_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{ else }}{{ template "postgresql.fullname" . }}{{ end }} -o jsonpath="{.data.postgresql-password}" | base64 --decode)
+
+To connect to your database run the following command:
+
+ kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image bitnami/postgresql --env="PGPASSWORD=$POSTGRESQL_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}
+ --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }}
+
+{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}
+Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster.
+{{- end }}
+
+To connect to your database from outside the cluster execute the following commands:
+
+{{- if contains "NodePort" .Values.service.type }}
+
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }})
+ {{ if .Values.postgresqlPassword }}PGPASSWORD="{{ .Values.postgresqlPassword}}" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }}
+
+{{- else if contains "LoadBalancer" .Values.service.type }}
+
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}'
+
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+ {{ if .Values.postgresqlPassword }}PGPASSWORD="{{ .Values.postgresqlPassword}}" {{ end }}psql --host $SERVICE_IP --port {{ .Values.service.port }} -U {{ .Values.postgresqlUsername }}
+
+{{- else if contains "ClusterIP" .Values.service.type }}
+
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} 5432:5432 &
+ {{ if .Values.postgresqlPassword }}PGPASSWORD="{{ .Values.postgresqlPassword}}" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }}
+
+{{- end }}
--- /dev/null
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "postgresql.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "postgresql.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- printf .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "postgresql.master.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}}
+{{- if .Values.replication.enabled -}}
+{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "postgresql.networkPolicy.apiVersion" -}}
+{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}}
+"extensions/v1beta1"
+{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}}
+"networking.k8s.io/v1"
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "postgresql.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Return the proper PostgreSQL image name
+*/}}
+{{- define "postgresql.image" -}}
+{{- $registryName := .Values.image.registry -}}
+{{- $repositoryName := .Values.image.repository -}}
+{{- $tag := .Values.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+ {{- if .Values.global.imageRegistry }}
+ {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+ {{- else -}}
+ {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+ {{- end -}}
+{{- else -}}
+ {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper image name to change the volume permissions
+*/}}
+{{- define "postgresql.volumePermissions.image" -}}
+{{- $registryName := .Values.volumePermissions.image.registry -}}
+{{- $repositoryName := .Values.volumePermissions.image.repository -}}
+{{- $tag := .Values.volumePermissions.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+ {{- if .Values.global.imageRegistry }}
+ {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+ {{- else -}}
+ {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+ {{- end -}}
+{{- else -}}
+ {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+
+{{/*
+Return the proper PostgreSQL metrics image name
+*/}}
+{{- define "metrics.image" -}}
+{{- $registryName := default "docker.io" .Values.metrics.image.registry -}}
+{{- $tag := default "latest" .Values.metrics.image.tag | toString -}}
+{{- printf "%s/%s:%s" $registryName .Values.metrics.image.repository $tag -}}
+{{- end -}}
+
+{{/*
+Get the password secret.
+*/}}
+{{- define "postgresql.secretName" -}}
+{{- if .Values.existingSecret -}}
+{{- printf "%s" .Values.existingSecret -}}
+{{- else -}}
+{{- printf "%s" (include "postgresql.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the configuration ConfigMap name.
+*/}}
+{{- define "postgresql.configurationCM" -}}
+{{- if .Values.configurationConfigMap -}}
+{{- printf "%s" .Values.configurationConfigMap -}}
+{{- else -}}
+{{- printf "%s-configuration" (include "postgresql.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the extended configuration ConfigMap name.
+*/}}
+{{- define "postgresql.extendedConfigurationCM" -}}
+{{- if .Values.extendedConfConfigMap -}}
+{{- printf "%s" .Values.extendedConfConfigMap -}}
+{{- else -}}
+{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the initialization scripts ConfigMap name.
+*/}}
+{{- define "postgresql.initdbScriptsCM" -}}
+{{- if .Values.initdbScriptsConfigMap -}}
+{{- printf "%s" .Values.initdbScriptsConfigMap -}}
+{{- else -}}
+{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}}
+{{- end -}}
+{{- end -}}
--- /dev/null
+{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "postgresql.fullname" . }}-configuration
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+data:
+{{- if (.Files.Glob "files/postgresql.conf") }}
+{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }}
+{{- else if .Values.postgresqlConfiguration }}
+ postgresql.conf: |
+{{- range $key, $value := default dict .Values.postgresqlConfiguration }}
+ {{ $key | snakecase }}={{ $value }}
+{{- end }}
+{{- end }}
+{{- if (.Files.Glob "files/pg_hba.conf") }}
+{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }}
+{{- else if .Values.pgHbaConfiguration }}
+ pg_hba.conf: |
+{{ .Values.pgHbaConfiguration | indent 4 }}
+{{- end }}
+{{ end }}
--- /dev/null
+{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "postgresql.fullname" . }}-extended-configuration
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+data:
+{{- with .Files.Glob "files/conf.d/*.conf" }}
+{{ .AsConfig | indent 2 }}
+{{- end }}
+{{ with .Values.postgresqlExtendedConf }}
+ override.conf: |
+{{- range $key, $value := . }}
+ {{ $key | snakecase }}={{ $value }}
+{{- end }}
+{{- end }}
+{{- end }}
--- /dev/null
+{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "postgresql.fullname" . }}-init-scripts
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }}
+binaryData:
+{{- range $path, $bytes := . }}
+ {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }}
+{{- end }}
+{{- end }}
+data:
+{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }}
+{{ .AsConfig | indent 2 }}
+{{- end }}
+{{- with .Values.initdbScripts }}
+{{ toYaml . | indent 2 }}
+{{- end }}
+{{- end }}
--- /dev/null
+{{- if .Values.metrics.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "postgresql.fullname" . }}-metrics
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+ annotations:
+{{ toYaml .Values.metrics.service.annotations | indent 4 }}
+spec:
+ type: {{ .Values.metrics.service.type }}
+ {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }}
+ {{- end }}
+ ports:
+ - name: metrics
+ port: 9187
+ targetPort: metrics
+ selector:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name }}
+ role: master
+{{- end }}
--- /dev/null
+{{- if .Values.networkPolicy.enabled }}
+kind: NetworkPolicy
+apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }}
+metadata:
+ name: {{ template "postgresql.fullname" . }}
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+spec:
+ podSelector:
+ matchLabels:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name | quote }}
+ ingress:
+ # Allow inbound connections
+ - ports:
+ - port: 5432
+ {{- if not .Values.networkPolicy.allowExternal }}
+ from:
+ - podSelector:
+ matchLabels:
+ {{ template "postgresql.fullname" . }}-client: "true"
+ {{- end }}
+ # Allow prometheus scrapes
+ - ports:
+ - port: 9187
+{{- end }}
--- /dev/null
+{{- if not .Values.existingSecret }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "postgresql.fullname" . }}
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+type: Opaque
+data:
+ {{- if .Values.postgresqlPassword }}
+ postgresql-password: {{ .Values.postgresqlPassword | b64enc | quote }}
+ {{- else }}
+ postgresql-password: {{ randAlphaNum 10 | b64enc | quote }}
+ {{- end }}
+ {{- if .Values.replication.enabled }}
+ {{- if .Values.replication.password }}
+ postgresql-replication-password: {{ .Values.replication.password | b64enc | quote }}
+ {{- else }}
+ postgresql-replication-password: {{ randAlphaNum 10 | b64enc | quote }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
--- /dev/null
+{{- if .Values.replication.enabled }}
+apiVersion: apps/v1beta2
+kind: StatefulSet
+metadata:
+ name: "{{ template "postgresql.fullname" . }}-slave"
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+spec:
+ serviceName: {{ template "postgresql.fullname" . }}-headless
+ replicas: {{ .Values.replication.slaveReplicas }}
+ selector:
+ matchLabels:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name | quote }}
+ role: slave
+ template:
+ metadata:
+ name: {{ template "postgresql.fullname" . }}
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+ role: slave
+ spec:
+ {{- if .Values.securityContext.enabled }}
+ securityContext:
+ fsGroup: {{ .Values.securityContext.fsGroup }}
+ runAsUser: {{ .Values.securityContext.runAsUser }}
+ {{- end }}
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end}}
+ {{- end }}
+ {{- if .Values.slave.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.slave.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.slave.affinity }}
+ affinity:
+{{ toYaml .Values.slave.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.slave.tolerations }}
+ tolerations:
+{{ toYaml .Values.slave.tolerations | indent 8 }}
+ {{- end }}
+ {{- if .Values.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
+ {{- end }}
+ {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
+ initContainers:
+ - name: init-chmod-data
+ image: {{ template "postgresql.volumePermissions.image" . }}
+ imagePullPolicy: "{{ .Values.volumePermissions.image.pullPolicy }}"
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ command:
+ - sh
+ - -c
+ - |
+ chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /bitnami
+ if [ -d /bitnami/postgresql/data ]; then
+ chmod 0700 /bitnami/postgresql/data;
+ fi
+ securityContext:
+ runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/postgresql
+ {{- end }}
+ containers:
+ - name: {{ template "postgresql.fullname" . }}
+ image: {{ template "postgresql.image" . }}
+ imagePullPolicy: "{{ .Values.image.pullPolicy }}"
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ env:
+ {{- if .Values.image.debug}}
+ - name: BASH_DEBUG
+ value: "1"
+ - name: NAMI_DEBUG
+ value: "1"
+ {{- end }}
+ - name: POSTGRESQL_REPLICATION_MODE
+ value: "slave"
+ - name: POSTGRESQL_REPLICATION_USER
+ value: {{ .Values.replication.user | quote }}
+ {{- if .Values.usePasswordFile }}
+ - name: POSTGRESQL_REPLICATION_PASSWORD_FILE
+ value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password"
+ {{- else }}
+ - name: POSTGRESQL_REPLICATION_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "postgresql.secretName" . }}
+ key: postgresql-replication-password
+ {{- end }}
+ - name: POSTGRESQL_CLUSTER_APP_NAME
+ value: {{ .Values.replication.applicationName }}
+ - name: POSTGRESQL_MASTER_HOST
+ value: {{ template "postgresql.fullname" . }}
+ - name: POSTGRESQL_MASTER_PORT_NUMBER
+ value: {{ .Values.service.port | quote }}
+ ports:
+ - name: postgresql
+ containerPort: {{ .Values.service.port }}
+ {{- if .Values.livenessProbe.enabled }}
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ {{- if .Values.postgresqlDatabase }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -d {{ .Values.postgresqlDatabase | quote }} -h localhost
+ {{- else }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -h localhost
+ {{- end }}
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.readinessProbe.enabled }}
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ {{- if .Values.postgresqlDatabase }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -d {{ .Values.postgresqlDatabase | quote }} -h localhost
+ {{- else }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -h localhost
+ {{- end }}
+ initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.usePasswordFile }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets
+ {{ end }}
+ {{- if .Values.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.persistence.mountPath }}
+ {{ end }}
+ {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.extendedConfConfigMap }}
+ - name: postgresql-extended-config
+ mountPath: /bitnami/postgresql/conf/conf.d/
+ {{- end }}
+ {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }}
+ - name: postgresql-config
+ mountPath: /bitnami/postgresql/conf
+ {{- end }}
+ volumes:
+ {{- if .Values.usePasswordFile }}
+ - name: postgresql-password
+ secret:
+ secretName: {{ template "postgresql.secretName" . }}
+ {{ end }}
+ {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}}
+ - name: postgresql-config
+ configMap:
+ name: {{ template "postgresql.configurationCM" . }}
+ {{- end }}
+ {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.extendedConfConfigMap }}
+ - name: postgresql-extended-config
+ configMap:
+ name: {{ template "postgresql.extendedConfigurationCM" . }}
+ {{- end }}
+ {{- if not .Values.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+ {{- end }}
+ updateStrategy:
+ type: {{ .Values.updateStrategy.type }}
+{{- if .Values.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ {{- with .Values.persistence.annotations }}
+ annotations:
+ {{- range $key, $value := . }}
+ {{ $key }}: {{ $value }}
+ {{- end }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+ {{- if .Values.persistence.storageClass }}
+ {{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- end }}
--- /dev/null
+apiVersion: apps/v1beta2
+kind: StatefulSet
+metadata:
+ name: {{ template "postgresql.master.fullname" . }}
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+spec:
+ serviceName: {{ template "postgresql.fullname" . }}-headless
+ replicas: 1
+ updateStrategy:
+ type: {{ .Values.updateStrategy.type }}
+ selector:
+ matchLabels:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name | quote }}
+ role: master
+ template:
+ metadata:
+ name: {{ template "postgresql.fullname" . }}
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+ role: master
+ spec:
+ {{- if .Values.securityContext.enabled }}
+ securityContext:
+ fsGroup: {{ .Values.securityContext.fsGroup }}
+ runAsUser: {{ .Values.securityContext.runAsUser }}
+ {{- end }}
+ {{- if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end}}
+ {{- range .Values.metrics.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end}}
+ {{- end }}
+ {{- if .Values.master.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.master.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.master.affinity }}
+ affinity:
+{{ toYaml .Values.master.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.master.tolerations }}
+ tolerations:
+{{ toYaml .Values.master.tolerations | indent 8 }}
+ {{- end }}
+ {{- if .Values.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
+ {{- end }}
+ {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
+ initContainers:
+ - name: init-chmod-data
+ image: {{ template "postgresql.volumePermissions.image" . }}
+ imagePullPolicy: "{{ .Values.volumePermissions.image.pullPolicy }}"
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ command:
+ - sh
+ - -c
+ - |
+ chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /bitnami
+ if [ -d /bitnami/postgresql/data ]; then
+ chmod 0700 /bitnami/postgresql/data;
+ fi
+ securityContext:
+ runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/postgresql
+ {{- end }}
+ containers:
+ - name: {{ template "postgresql.fullname" . }}
+ image: {{ template "postgresql.image" . }}
+ imagePullPolicy: "{{ .Values.image.pullPolicy }}"
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ env:
+ {{- if .Values.image.debug}}
+ - name: BASH_DEBUG
+ value: "1"
+ - name: NAMI_DEBUG
+ value: "1"
+ {{- end }}
+ {{- if .Values.replication.enabled }}
+ - name: POSTGRESQL_REPLICATION_MODE
+ value: "master"
+ - name: POSTGRESQL_REPLICATION_USER
+ value: {{ .Values.replication.user | quote }}
+ {{- if .Values.usePasswordFile }}
+ - name: POSTGRESQL_REPLICATION_PASSWORD_FILE
+ value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password"
+ {{- else }}
+ - name: POSTGRESQL_REPLICATION_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "postgresql.secretName" . }}
+ key: postgresql-replication-password
+ {{- end }}
+ {{- if not (eq .Values.replication.synchronousCommit "off")}}
+ - name: POSTGRESQL_SYNCHRONOUS_COMMIT_MODE
+ value: {{ .Values.replication.synchronousCommit | quote }}
+ - name: POSTGRESQL_NUM_SYNCHRONOUS_REPLICAS
+ value: {{ .Values.replication.numSynchronousReplicas | quote }}
+ {{- end }}
+ - name: POSTGRESQL_CLUSTER_APP_NAME
+ value: {{ .Values.replication.applicationName }}
+ {{- end }}
+ - name: POSTGRESQL_USERNAME
+ value: {{ .Values.postgresqlUsername | quote }}
+ {{- if .Values.usePasswordFile }}
+ - name: POSTGRESQL_PASSWORD_FILE
+ value: "/opt/bitnami/postgresql/secrets/postgresql-password"
+ {{- else }}
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "postgresql.secretName" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.postgresqlDatabase }}
+ - name: POSTGRESQL_DATABASE
+ value: {{ .Values.postgresqlDatabase | quote }}
+ {{- end }}
+{{- if .Values.extraEnv }}
+{{ toYaml .Values.extraEnv | indent 8 }}
+{{- end }}
+ ports:
+ - name: postgresql
+ containerPort: {{ .Values.service.port }}
+ {{- if .Values.livenessProbe.enabled }}
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ {{- if .Values.postgresqlDatabase }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -d {{ .Values.postgresqlDatabase | quote }} -h localhost
+ {{- else }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -h localhost
+ {{- end }}
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.readinessProbe.enabled }}
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ {{- if .Values.postgresqlDatabase }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -d {{ .Values.postgresqlDatabase | quote }} -h localhost
+ {{- else }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -h localhost
+ {{- end }}
+ initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+ {{- end }}
+ volumeMounts:
+ {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }}
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d
+ {{- end }}
+ {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}
+ - name: postgresql-extended-config
+ mountPath: /bitnami/postgresql/conf/conf.d/
+ {{- end }}
+ {{- if .Values.usePasswordFile }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.persistence.mountPath }}
+ {{- end }}
+ {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }}
+ - name: postgresql-config
+ mountPath: /bitnami/postgresql/conf
+ {{- end }}
+{{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ template "metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ env:
+ {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase)" .Values.postgresqlDatabase }}
+ - name: DATA_SOURCE_URI
+ value: {{ printf "localhost:%d/%s?sslmode=disable" (int .Values.service.port) $database | quote }}
+ {{- if .Values.usePasswordFile }}
+ - name: DATA_SOURCE_PASS_FILE
+ value: "/opt/bitnami/postgresql/secrets/postgresql-password"
+ {{- else }}
+ - name: DATA_SOURCE_PASS
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "postgresql.secretName" . }}
+ key: postgresql-password
+ {{- end }}
+ - name: DATA_SOURCE_USER
+ value: {{ .Values.postgresqlUsername }}
+ {{- if .Values.livenessProbe.enabled }}
+ livenessProbe:
+ httpGet:
+ path: /
+ port: metrics
+ initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.readinessProbe.enabled }}
+ readinessProbe:
+ httpGet:
+ path: /
+ port: metrics
+ initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.usePasswordFile }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ ports:
+ - name: metrics
+ containerPort: 9187
+ resources:
+{{ toYaml .Values.metrics.resources | indent 10 }}
+{{- end }}
+ volumes:
+ {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}}
+ - name: postgresql-config
+ configMap:
+ name: {{ template "postgresql.configurationCM" . }}
+ {{- end }}
+ {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}
+ - name: postgresql-extended-config
+ configMap:
+ name: {{ template "postgresql.extendedConfigurationCM" . }}
+ {{- end }}
+ {{- if .Values.usePasswordFile }}
+ - name: postgresql-password
+ secret:
+ secretName: {{ template "postgresql.secretName" . }}
+ {{- end }}
+ {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }}
+ - name: custom-init-scripts
+ configMap:
+ name: {{ template "postgresql.initdbScriptsCM" . }}
+ {{- end }}
+{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }}
+ - name: data
+ persistentVolumeClaim:
+ claimName: {{ .Values.persistence.existingClaim }}
+{{- else if not .Values.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ {{- with .Values.persistence.annotations }}
+ annotations:
+ {{- range $key, $value := . }}
+ {{ $key }}: {{ $value }}
+ {{- end }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+ {{- if .Values.persistence.storageClass }}
+ {{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+ {{- end }}
+ {{- end }}
+{{- end }}
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "postgresql.fullname" . }}-headless
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: postgresql
+ port: 5432
+ targetPort: postgresql
+ selector:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name | quote }}
--- /dev/null
+{{- if .Values.replication.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "postgresql.fullname" . }}-read
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+{{- with .Values.service.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }}
+ loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+ {{- end }}
+ ports:
+ - name: postgresql
+ port: {{ .Values.service.port }}
+ targetPort: postgresql
+ {{- if .Values.service.nodePort }}
+ nodePort: {{ .Values.service.nodePort }}
+ {{- end }}
+ selector:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name | quote }}
+ role: slave
+{{- end }}
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "postgresql.fullname" . }}
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+{{- with .Values.service.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }}
+ loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }}
+ clusterIP: {{ .Values.service.clusterIP }}
+ {{- end }}
+ ports:
+ - name: postgresql
+ port: {{ .Values.service.port }}
+ targetPort: postgresql
+ {{- if .Values.service.nodePort }}
+ nodePort: {{ .Values.service.nodePort }}
+ {{- end }}
+ selector:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name | quote }}
+ role: master
--- /dev/null
+## Global Docker image registry
+### Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
+###
+## global:
+## imageRegistry:
+
+## Bitnami PostgreSQL image version
+## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
+##
+image:
+ registry: docker.io
+ repository: bitnami/postgresql
+ tag: 10.6.0
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: Always
+
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+ ## Set to true if you would like to see extra information on logs
+ ## It turns BASH and NAMI debugging in minideb
+ ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
+ debug: false
+
+##
+## Init containers parameters:
+## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
+##
+volumePermissions:
+ enabled: true
+ image:
+ registry: docker.io
+ repository: bitnami/minideb
+ tag: latest
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: Always
+ ## Init container Security Context
+ securityContext:
+ runAsUser: 0
+
+## Pod Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+##
+securityContext:
+ enabled: true
+ fsGroup: 1001
+ runAsUser: 1001
+
+replication:
+ enabled: true
+ user: repl_user
+ password: repl_password
+ slaveReplicas: 2
+ ## Set synchronous commit mode: on, off, remote_apply, remote_write and local
+ ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL
+ synchronousCommit: "on"
+ ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication
+ ## NOTE: It cannot be > slaveReplicas
+ numSynchronousReplicas: 1
+ ## Replication Cluster application name. Useful for defining multiple replication policies
+ applicationName: my_application
+
+## PostgreSQL admin user
+## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
+postgresqlUsername: postgres
+
+## PostgreSQL password
+## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
+##
+# postgresqlPassword:
+
+## Create a database
+## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run
+##
+# postgresqlDatabase:
+
+## PostgreSQL password using existing secret
+## existingSecret: secret
+
+## Mount PostgreSQL secret as a file instead of passing environment variable
+# usePasswordFile: false
+
+## PostgreSQL configuration
+## Specify runtime configuration parameters as a dict, using camelCase, e.g.
+## {"sharedBuffers": "500MB"}
+## Alternatively, you can put your postgresql.conf under the files/ directory
+## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
+##
+# postgresqlConfiguration:
+
+## PostgreSQL extended configuration
+## As above, but _appended_ to the main configuration
+## Alternatively, you can put your *.conf under the files/conf.d/ directory
+## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
+##
+# postgresqlExtendedConf:
+
+## PostgreSQL client authentication configuration
+## Specify content for pg_hba.conf
+## Default: do not create pg_hba.conf
+## Alternatively, you can put your pg_hba.conf under the files/ directory
+# pgHbaConfiguration: |-
+# local all all trust
+# host all all localhost trust
+# host mydatabase mysuser 192.168.0.0/24 md5
+
+## ConfigMap with PostgreSQL configuration
+## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration
+# configurationConfigMap:
+
+## ConfigMap with PostgreSQL extended configuration
+# extendedConfConfigMap:
+
+## initdb scripts
+## Specify dictionnary of scripts to be run at first boot
+## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
+##
+# initdbScripts:
+# my_init_script.sh:|
+# #!/bin/sh
+# echo "Do something."
+
+## ConfigMap with scripts to be run at first boot
+## NOTE: This will override initdbScripts
+# initdbScriptsConfigMap:
+
+## PostgreSQL service configuration
+service:
+ ## PosgresSQL service type
+ type: ClusterIP
+ port: 5432
+
+ ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ##
+ # nodePort:
+
+ ## Provide any additional annotations which may be required. This can be used to
+ annotations: {}
+ ## Set the LoadBalancer service type to internal only.
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ # loadBalancerIP:
+
+## PostgreSQL data Persistent Volume Storage Class
+## If defined, storageClassName: <storageClass>
+## If set to "-", storageClassName: "", which disables dynamic provisioning
+## If undefined (the default) or set to null, no storageClassName spec is
+## set, choosing the default provisioner. (gp2 on AWS, standard on
+## GKE, AWS & OpenStack)
+##
+persistence:
+ enabled: true
+ ## A manually managed Persistent Volume and Claim
+ ## If defined, PVC must be created manually before volume will be bound
+ # existingClaim:
+ mountPath: /bitnami/postgresql
+ # storageClass: "-"
+ accessModes:
+ - ReadWriteOnce
+ size: 8Gi
+ annotations: {}
+
+## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+updateStrategy:
+ type: RollingUpdate
+
+##
+## PostgreSQL Master parameters
+##
+master:
+ ## Node, affinity and tolerations labels for pod assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
+ nodeSelector: {}
+ affinity: {}
+ tolerations: []
+
+##
+## PostgreSQL Slave parameters
+##
+slave:
+ ## Node, affinity and tolerations labels for pod assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
+ nodeSelector: {}
+ affinity: {}
+ tolerations: []
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+##
+resources:
+ requests:
+ memory: 256Mi
+ cpu: 250m
+
+networkPolicy:
+ ## Enable creation of NetworkPolicy resources.
+ ##
+ enabled: false
+
+ ## The Policy model to apply. When set to false, only pods with the correct
+ ## client label will have network access to the port PostgreSQL is listening
+ ## on. When true, PostgreSQL will accept connections from any source
+ ## (with the correct destination port).
+ ##
+ allowExternal: true
+
+## Configure extra options for liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+livenessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+## Configure metrics exporter
+##
+metrics:
+ enabled: true
+ # resources: {}
+ service:
+ type: ClusterIP
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9187"
+ loadBalancerIP:
+ image:
+ registry: docker.io
+ repository: wrouesnel/postgres_exporter
+ tag: v0.4.6
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+ ## Configure extra options for liveness and readiness probes
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+# Define custom environment variables to pass to the image here
+extraEnv: {}
--- /dev/null
+## Global Docker image registry
+### Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
+###
+## global:
+## imageRegistry:
+
+## Bitnami PostgreSQL image version
+## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
+##
+image:
+ registry: docker.io
+ repository: bitnami/postgresql
+ tag: 10.6.0
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: Always
+
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+ ## Set to true if you would like to see extra information on logs
+ ## It turns BASH and NAMI debugging in minideb
+ ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
+ debug: false
+
+##
+## Init containers parameters:
+## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
+##
+volumePermissions:
+ enabled: true
+ image:
+ registry: docker.io
+ repository: bitnami/minideb
+ tag: latest
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: Always
+ ## Init container Security Context
+ securityContext:
+ runAsUser: 0
+
+## Pod Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+##
+securityContext:
+ enabled: true
+ fsGroup: 1001
+ runAsUser: 1001
+
+replication:
+ enabled: false
+ user: repl_user
+ password: repl_password
+ slaveReplicas: 1
+ ## Set synchronous commit mode: on, off, remote_apply, remote_write and local
+ ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL
+ synchronousCommit: "off"
+ ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication
+ ## NOTE: It cannot be > slaveReplicas
+ numSynchronousReplicas: 0
+ ## Replication Cluster application name. Useful for defining multiple replication policies
+ applicationName: my_application
+
+## PostgreSQL admin user
+## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
+postgresqlUsername: postgres
+
+## PostgreSQL password
+## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
+##
+# postgresqlPassword:
+
+## PostgreSQL password using existing secret
+## existingSecret: secret
+
+## Mount PostgreSQL secret as a file instead of passing environment variable
+# usePasswordFile: false
+
+## Create a database
+## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run
+##
+# postgresqlDatabase:
+
+## PostgreSQL configuration
+## Specify runtime configuration parameters as a dict, using camelCase, e.g.
+## {"sharedBuffers": "500MB"}
+## Alternatively, you can put your postgresql.conf under the files/ directory
+## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
+##
+# postgresqlConfiguration:
+
+## PostgreSQL extended configuration
+## As above, but _appended_ to the main configuration
+## Alternatively, you can put your *.conf under the files/conf.d/ directory
+## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
+##
+# postgresqlExtendedConf:
+
+## PostgreSQL client authentication configuration
+## Specify content for pg_hba.conf
+## Default: do not create pg_hba.conf
+## Alternatively, you can put your pg_hba.conf under the files/ directory
+# pgHbaConfiguration: |-
+# local all all trust
+# host all all localhost trust
+# host mydatabase mysuser 192.168.0.0/24 md5
+
+## ConfigMap with PostgreSQL configuration
+## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration
+# configurationConfigMap:
+
+## ConfigMap with PostgreSQL extended configuration
+# extendedConfConfigMap:
+
+## initdb scripts
+## Specify dictionnary of scripts to be run at first boot
+## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
+##
+# initdbScripts:
+# my_init_script.sh:|
+# #!/bin/sh
+# echo "Do something."
+#
+## ConfigMap with scripts to be run at first boot
+## NOTE: This will override initdbScripts
+# initdbScriptsConfigMap:
+
+## Optional duration in seconds the pod needs to terminate gracefully.
+## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
+##
+# terminationGracePeriodSeconds: 30
+
+## PostgreSQL service configuration
+service:
+ ## PosgresSQL service type
+ type: ClusterIP
+ # clusterIP: None
+ port: 5432
+
+ ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ##
+ # nodePort:
+
+ ## Provide any additional annotations which may be required. This can be used to
+ annotations: {}
+ ## Set the LoadBalancer service type to internal only.
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ # loadBalancerIP:
+
+## PostgreSQL data Persistent Volume Storage Class
+## If defined, storageClassName: <storageClass>
+## If set to "-", storageClassName: "", which disables dynamic provisioning
+## If undefined (the default) or set to null, no storageClassName spec is
+## set, choosing the default provisioner. (gp2 on AWS, standard on
+## GKE, AWS & OpenStack)
+##
+persistence:
+ enabled: true
+ ## A manually managed Persistent Volume and Claim
+ ## If defined, PVC must be created manually before volume will be bound
+ # existingClaim:
+ mountPath: /bitnami/postgresql
+ # storageClass: "-"
+ accessModes:
+ - ReadWriteOnce
+ size: 8Gi
+ annotations: {}
+
+## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+updateStrategy:
+ type: RollingUpdate
+
+##
+## PostgreSQL Master parameters
+##
+master:
+ ## Node, affinity and tolerations labels for pod assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
+ nodeSelector: {}
+ affinity: {}
+ tolerations: []
+
+##
+## PostgreSQL Slave parameters
+##
+slave:
+ ## Node, affinity and tolerations labels for pod assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
+ nodeSelector: {}
+ affinity: {}
+ tolerations: []
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+##
+resources:
+ requests:
+ memory: 256Mi
+ cpu: 250m
+
+networkPolicy:
+ ## Enable creation of NetworkPolicy resources.
+ ##
+ enabled: false
+
+ ## The Policy model to apply. When set to false, only pods with the correct
+ ## client label will have network access to the port PostgreSQL is listening
+ ## on. When true, PostgreSQL will accept connections from any source
+ ## (with the correct destination port).
+ ##
+ allowExternal: true
+
+## Configure extra options for liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+livenessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+## Configure metrics exporter
+##
+metrics:
+ enabled: false
+ # resources: {}
+ service:
+ type: ClusterIP
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9187"
+ loadBalancerIP:
+ image:
+ registry: docker.io
+ repository: wrouesnel/postgres_exporter
+ tag: v0.4.6
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+ ## Configure extra options for liveness and readiness probes
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+# Define custom environment variables to pass to the image here
+extraEnv: {}
--- /dev/null
+env:
+ database: cassandra
+
+cassandra:
+ enabled: true
+postgres:
+ enabled: false
--- /dev/null
+# CI test for testing dbless deployment
+ingressController:
+ enabled: true
+env:
+ database: "off"
+postgresql:
+ enabled: false
--- /dev/null
+# Default values for kong.
+# Declare variables to be passed into your templates.
+
+image:
+ repository: kong
+ # repository: kong-docker-kong-enterprise-edition-docker.bintray.io/kong-enterprise-edition
+ tag: 1.2
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## If using the official Kong Enterprise registry above, you MUST provide a secret.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+waitImage:
+ repository: busybox
+ tag: latest
+
+# Specify Kong admin and proxy services configurations
+admin:
+ # If you want to specify annotations for the admin service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTPS traffic on the admin port
+ # if set to false also set readinessProbe and livenessProbe httpGet scheme's to 'HTTP'
+ useTLS: true
+ servicePort: 8444
+ containerPort: 8444
+ # Kong admin service type
+ type: NodePort
+ # Set a nodePort which is available
+ # nodePort: 32444
+ # Kong admin ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-admin.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+proxy:
+ # If you want to specify annotations for the proxy service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 80
+ containerPort: 8000
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 443
+ containerPort: 8443
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+manager:
+ # If you want to specify annotations for the Manager service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 8002
+ containerPort: 8002
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 8445
+ containerPort: 8445
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+portal:
+ # If you want to specify annotations for the Portal service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 8003
+ containerPort: 8003
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 8446
+ containerPort: 8446
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+portalapi:
+ # If you want to specify annotations for the Portal API service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 8004
+ containerPort: 8004
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 8447
+ containerPort: 8447
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+# Toggle Kong Enterprise features on or off
+# RBAC and SMTP configuration have additional options that must all be set together
+# Other settings should be added to the "env" settings below
+enterprise:
+ enabled: false
+ # Kong Enterprise license secret name
+ # This secret must contain a single 'license' key, containing your base64-encoded license data
+ # The license secret is required for all Kong Enterprise deployments
+ license_secret: you-must-create-a-kong-license-secret
+ # Session configuration secret
+ # The session conf secret is required if using RBAC or the Portal
+ vitals:
+ enabled: true
+ portal:
+ enabled: false
+ # portal_auth here sets the default authentication mechanism for the Portal
+ # FIXME This can be changed per-workspace, but must currently default to
+ # basic-auth to work around limitations with session configuration
+ portal_auth: basic-auth
+ # If the Portal is enabled and any workspace's Portal uses authentication,
+ # this Secret must contain an portal_session_conf key
+ # The key value must be a secret configuration, following the example at https://docs.konghq.com/enterprise/0.35-x/kong-manager/authentication/sessions/
+ session_conf_secret: you-must-create-a-portal-session-conf-secret
+ rbac:
+ enabled: false
+ admin_gui_auth: basic-auth
+ # If RBAC is enabled, this Secret must contain an admin_gui_session_conf key
+ # The key value must be a secret configuration, following the example at https://docs.konghq.com/enterprise/0.35-x/kong-manager/authentication/sessions/
+ session_conf_secret: you-must-create-an-rbac-session-conf-secret
+ # Set to the appropriate plugin config JSON if not using basic-auth
+ # admin_gui_auth_conf: ''
+ smtp:
+ enabled: false
+ portal_emails_from: none@example.com
+ portal_emails_reply_to: none@example.com
+ admin_emails_from: none@example.com
+ admin_emails_reply_to: none@example.com
+ smtp_admin_emails: none@example.com
+ smtp_host: smtp.example.com
+ smtp_port: 587
+ smtp_starttls: true
+ auth:
+ # If your SMTP server does not require authentication, this section can
+ # be left as-is. If smtp_username is set to anything other than an empty
+ # string, you must create a Secret with an smtp_password key containing
+ # your SMTP password and specify its name here.
+ smtp_username: '' # e.g. postmaster@example.com
+ smtp_password_secret: you-must-create-an-smtp-password
+
+# Set runMigrations to run Kong migrations
+runMigrations: true
+
+# Specify Kong configurations
+# Kong configurations guide https://getkong.org/docs/latest/configuration/
+env:
+ database: postgres
+ proxy_access_log: /dev/stdout
+ admin_access_log: /dev/stdout
+ admin_gui_access_log: /dev/stdout
+ portal_api_access_log: /dev/stdout
+ proxy_error_log: /dev/stderr
+ admin_error_log: /dev/stderr
+ admin_gui_error_log: /dev/stderr
+ portal_api_error_log: /dev/stderr
+
+# If you want to specify resources, uncomment the following
+# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+# readinessProbe for Kong pods
+# If using Kong Enterprise with RBAC, you must add a Kong-Admin-Token header
+readinessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ timeoutSeconds: 1
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 5
+
+# livenessProbe for Kong pods
+# If using Kong Enterprise with RBAC, you must add a Kong-Admin-Token header
+livenessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ periodSeconds: 30
+ successThreshold: 1
+ failureThreshold: 5
+
+# Affinity for pod assignment
+# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+# affinity: {}
+
+# Tolerations for pod assignment
+# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+tolerations: []
+
+# Node labels for pod assignment
+# Ref: https://kubernetes.io/docs/user-guide/node-selection/
+nodeSelector: {}
+
+# Annotation to be added to Kong pods
+podAnnotations: {}
+
+# Kong pod count
+replicaCount: 1
+
+# Kong has a choice of either Postgres or Cassandra as a backend datatstore.
+# This chart allows you to choose either of them with the `database.type`
+# parameter. Postgres is chosen by default.
+
+# Additionally, this chart allows you to use your own database or spin up a new
+# instance by using the `postgres.enabled` or `cassandra.enabled` parameters.
+# Enabling both will create both databases in your cluster, but only one
+# will be used by Kong based on the `env.database` parameter.
+# Postgres is enabled by default.
+
+# Cassandra chart configs
+cassandra:
+ enabled: false
+
+# PostgreSQL chart configs
+postgresql:
+ enabled: true
+ postgresqlUsername: kong
+ postgresqlDatabase: kong
+ service:
+ port: 5432
+
+# Kong Ingress Controller's primary purpose is to satisfy Ingress resources
+# created in k8s. It uses CRDs for more fine grained control over routing and
+# for Kong specific configuration.
+ingressController:
+ enabled: false
+ image:
+ repository: kong-docker-kubernetes-ingress-controller.bintray.io/kong-ingress-controller
+ tag: 0.4.0
+ replicaCount: 1
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: "/healthz"
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: "/healthz"
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+
+ installCRDs: true
+
+ rbac:
+ # Specifies whether RBAC resources should be created
+ create: true
+
+ serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+ ingressClass: kong
--- /dev/null
+# CI test for Ingress controller basic installation
+ingressController:
+ enabled: true
--- /dev/null
+# CI test for LoadBalancer admin/proxy types
+
+admin:
+ useTLS: true
+ type: LoadBalancer
+ loadBalancerSourceRanges:
+ - 192.168.1.1/32
+ - 10.10.10.10/32
+
+proxy:
+ useTLS: true
+ type: LoadBalancer
+ loadBalancerSourceRanges:
+ - 192.168.1.1/32
+ - 10.10.10.10/32
+
+readinessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ timeoutSeconds: 1
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 5
+
+livenessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ periodSeconds: 30
+ successThreshold: 1
+ failureThreshold: 5
+
+postgresql:
+ enabled: true
+ postgresUser: kong
+ postgresDatabase: kong
+ service:
+ port: 5432
--- /dev/null
+dependencies:
+- name: postgresql
+ version: ~3.9.1
+ repository: https://kubernetes-charts.storage.googleapis.com/
+ condition: postgresql.enabled
+- name: cassandra
+ version: ~0.10.5
+ repository: https://kubernetes-charts-incubator.storage.googleapis.com/
+ condition: cassandra.enabled
--- /dev/null
+1. Kong Admin can be accessed inside the cluster using:
+ DNS={{ template "kong.fullname" . }}-admin.{{ .Release.Namespace }}.svc.cluster.local
+ PORT={{ .Values.admin.servicePort }}
+
+To connect from outside the K8s cluster:
+ {{- if contains "LoadBalancer" .Values.admin.type }}
+ HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "kong.fullname" . }}-admin -o jsonpath='{.status.loadBalancer.ingress.ip}')
+ PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "kong.fullname" . }}-admin -o jsonpath='{.spec.ports[0].nodePort}')
+
+ {{- else if contains "NodePort" .Values.admin.type }}
+ HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath='{.items[0].status.addresses[0].address}')
+ PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "kong.fullname" . }}-admin -o jsonpath='{.spec.ports[0].nodePort}')
+
+ {{- else if .Values.admin.ingress.enabled }}
+
+use one of the addresses listed below
+
+ {{- $path := .Values.admin.ingress.path -}}
+ {{- if .Values.admin.ingress.tls }}
+ {{- range .Values.admin.ingress.hosts }}
+ https://{{ . }}{{ $path }}
+ {{- end }}
+ {{- else }}
+ {{- range .Values.admin.ingress.hosts }}
+ http://{{ . }}{{ $path }}
+ {{- end }}
+ {{- end }}
+
+ {{- else if contains "ClusterIP" .Values.admin.type }}
+ HOST=127.0.0.1
+
+ # Execute the following commands to route the connection to Admin SSL port:
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ .Release.Name }}, app={{ template "kong.name" . }}" -o jsonpath="{.items[0].metadata.name}")
+ kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME {{ .Values.admin.servicePort }}:{{ .Values.admin.servicePort }}
+ {{- end }}
+
+
+2. Kong Proxy can be accessed inside the cluster using:
+ DNS={{ template "kong.fullname" . }}-proxy.{{ .Release.Namespace }}.svc.cluster.local
+ {{- if .Values.proxy.tls.enabled -}}
+ PORT={{ .Values.proxy.tls.servicePort }}
+ {{- else -}}
+ PORT={{ .Values.proxy.http.servicePort }}
+ {{- end -}}
+
+
+To connect from outside the K8s cluster:
+ {{- if contains "LoadBalancer" .Values.proxy.type }}
+ HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "kong.fullname" . }}-proxy -o jsonpath='{.status.loadBalancer.ingress.ip}')
+ PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "kong.fullname" . }}-proxy -o jsonpath='{.spec.ports[0].nodePort}')
+
+ {{- else if contains "NodePort" .Values.proxy.type }}
+ HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath='{.items[0].status.addresses[0].address}')
+ PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "kong.fullname" . }}-proxy -o jsonpath='{.spec.ports[0].nodePort}')
+
+ {{- else if .Values.proxy.ingress.enabled }}
+
+use one of the addresses listed below
+
+ {{- $path := .Values.proxy.ingress.path -}}
+ {{- if .Values.proxy.ingress.tls }}
+ {{- range .Values.proxy.ingress.hosts }}
+ https://{{ . }}{{ $path }}
+ {{- end }}
+ {{- else }}
+ {{- range .Values.proxy.ingress.hosts }}
+ http://{{ . }}{{ $path }}
+ {{- end }}
+ {{- end }}
+
+ {{- else if contains "ClusterIP" .Values.proxy.type }}
+ HOST=127.0.0.1
+
+ # Execute the following commands to route the connection to proxy SSL port:
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ .Release.Name }}, app={{ template "kong.name" . }}" -o jsonpath="{.items[0].metadata.name}")
+ {{- if .Values.proxy.tls.enabled -}}
+ kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME {{ .Values.proxy.tls.servicePort }}:{{ .Values.proxy.tls.servicePort }}
+ {{- else -}}
+ kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME {{ .Values.proxy.http.servicePort }}:{{ .Values.proxy.http.servicePort }}
+ {{- end -}}
+ {{- end }}
--- /dev/null
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+
+{{- define "kong.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "kong.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "kong.postgresql.fullname" -}}
+{{- $name := default "postgresql" .Values.postgresql.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "kong.cassandra.fullname" -}}
+{{- $name := default "cassandra" .Values.cassandra.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "kong.serviceAccountName" -}}
+{{- if .Values.ingressController.serviceAccount.create -}}
+ {{ default (include "kong.fullname" .) .Values.ingressController.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the KONG_PROXY_LISTEN value string
+*/}}
+{{- define "kong.kongProxyListenValue" -}}
+
+{{- if and .Values.proxy.http.enabled .Values.proxy.tls.enabled -}}
+ 0.0.0.0:{{ .Values.proxy.http.containerPort }},0.0.0.0:{{ .Values.proxy.tls.containerPort }} ssl
+{{- else -}}
+{{- if .Values.proxy.http.enabled -}}
+ 0.0.0.0:{{ .Values.proxy.http.containerPort }}
+{{- end -}}
+{{- if .Values.proxy.tls.enabled -}}
+ 0.0.0.0:{{ .Values.proxy.tls.containerPort }} ssl
+{{- end -}}
+{{- end -}}
+
+{{- end }}
+
+{{/*
+Create the KONG_ADMIN_GUI_LISTEN value string
+*/}}
+{{- define "kong.kongManagerListenValue" -}}
+
+{{- if and .Values.manager.http.enabled .Values.manager.tls.enabled -}}
+ 0.0.0.0:{{ .Values.manager.http.containerPort }},0.0.0.0:{{ .Values.manager.tls.containerPort }} ssl
+{{- else -}}
+{{- if .Values.manager.http.enabled -}}
+ 0.0.0.0:{{ .Values.manager.http.containerPort }}
+{{- end -}}
+{{- if .Values.manager.tls.enabled -}}
+ 0.0.0.0:{{ .Values.manager.tls.containerPort }} ssl
+{{- end -}}
+{{- end -}}
+
+{{- end }}
+
+{{/*
+Create the KONG_PORTAL_GUI_LISTEN value string
+*/}}
+{{- define "kong.kongPortalListenValue" -}}
+
+{{- if and .Values.portal.http.enabled .Values.portal.tls.enabled -}}
+ 0.0.0.0:{{ .Values.portal.http.containerPort }},0.0.0.0:{{ .Values.portal.tls.containerPort }} ssl
+{{- else -}}
+{{- if .Values.portal.http.enabled -}}
+ 0.0.0.0:{{ .Values.portal.http.containerPort }}
+{{- end -}}
+{{- if .Values.portal.tls.enabled -}}
+ 0.0.0.0:{{ .Values.portal.tls.containerPort }} ssl
+{{- end -}}
+{{- end -}}
+
+{{- end }}
+
+{{/*
+Create the KONG_PORTAL_API_LISTEN value string
+*/}}
+{{- define "kong.kongPortalApiListenValue" -}}
+
+{{- if and .Values.portalapi.http.enabled .Values.portalapi.tls.enabled -}}
+ 0.0.0.0:{{ .Values.portalapi.http.containerPort }},0.0.0.0:{{ .Values.portalapi.tls.containerPort }} ssl
+{{- else -}}
+{{- if .Values.portalapi.http.enabled -}}
+ 0.0.0.0:{{ .Values.portalapi.http.containerPort }}
+{{- end -}}
+{{- if .Values.portalapi.tls.enabled -}}
+ 0.0.0.0:{{ .Values.portalapi.tls.containerPort }} ssl
+{{- end -}}
+{{- end -}}
+
+{{- end }}
+
+{{/*
+Create the ingress servicePort value string
+*/}}
+
+{{- define "kong.ingress.servicePort" -}}
+{{- if .tls.enabled -}}
+ {{ .tls.servicePort }}
+{{- else -}}
+ {{ .http.servicePort }}
+{{- end -}}
+{{- end -}}
+
+
+{{- define "kong.env" -}}
+{{- range $key, $val := .Values.env }}
+- name: KONG_{{ $key | upper}}
+{{- $valueType := printf "%T" $val -}}
+{{ if eq $valueType "map[string]interface {}" }}
+{{ toYaml $val | indent 2 -}}
+{{- else }}
+ value: {{ $val | quote -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "kong.wait-for-db" -}}
+- name: wait-for-db
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ {{- if .Values.enterprise.enabled }}
+ {{- include "kong.license" . | nindent 2 }}
+ {{- end }}
+ {{- if .Values.postgresql.enabled }}
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.cassandra.enabled }}
+ - name: KONG_CASSANDRA_CONTACT_POINTS
+ value: {{ template "kong.cassandra.fullname" . }}
+ {{- end }}
+ {{- include "kong.env" . | nindent 2 }}
+ command: [ "/bin/sh", "-c", "until kong start; do echo 'waiting for db'; sleep 1; done; kong stop" ]
+{{- end -}}
+
+{{- define "kong.controller-container" -}}
+- name: ingress-controller
+ args:
+ - /kong-ingress-controller
+ # Service from were we extract the IP address/es to use in Ingress status
+ - --publish-service={{ .Release.Namespace }}/{{ template "kong.fullname" . }}-proxy
+ # Set the ingress class
+ - --ingress-class={{ .Values.ingressController.ingressClass }}
+ - --election-id=kong-ingress-controller-leader-{{ .Values.ingressController.ingressClass }}
+ # the kong URL points to the kong admin api server
+ {{- if .Values.admin.useTLS }}
+ - --kong-url=https://localhost:{{ .Values.admin.containerPort }}
+ - --admin-tls-skip-verify # TODO make this configurable
+ {{- else }}
+ - --kong-url=http://localhost:{{ .Values.admin.containerPort }}
+ {{- end }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ image: "{{ .Values.ingressController.image.repository }}:{{ .Values.ingressController.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /healthz
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /healthz
+ port: 10254
+ scheme: HTTP
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ resources:
+{{ toYaml .Values.ingressController.resources | indent 10 }}
+{{- end -}}
+
+{{/*
+Retrieve Kong Enterprise license from a secret and make it available in env vars
+*/}}
+{{- define "kong.license" -}}
+- name: KONG_LICENSE_DATA
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.enterprise.license_secret }}
+ key: license
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.rbac.create .Values.ingressController.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ template "kong.fullname" . }}
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ - nodes
+ - pods
+ - secrets
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - "extensions"
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - apiGroups:
+ - "extensions"
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+ - apiGroups:
+ - "configuration.konghq.com"
+ resources:
+ - kongplugins
+ - kongcredentials
+ - kongconsumers
+ - kongingresses
+ verbs:
+ - get
+ - list
+ - watch
+{{- end -}}
--- /dev/null
+{{- if (and (.Values.ingressController.enabled) (not (eq .Values.env.database "off"))) }}
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+ name: "{{ template "kong.fullname" . }}-controller"
+ labels:
+ app: "{{ template "kong.name" . }}"
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ component: "controller"
+spec:
+ replicas: {{ .Values.ingressController.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: "controller"
+ template:
+ metadata:
+ {{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: "controller"
+ spec:
+ serviceAccountName: {{ template "kong.serviceAccountName" . }}
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ {{- include "kong.wait-for-db" . | nindent 6 }}
+ containers:
+ - name: admin-api
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: KONG_PROXY_LISTEN
+ value: 'off'
+ {{- if .Values.enterprise.enabled }}
+ {{- if .Values.enterprise.rbac.enabled }}
+ # TODO: uncomment this once we have a means of securely providing the
+ # controller its token using a secret.
+ #- name: KONG_ENFORCE_RBAC
+ # value: "on"
+ {{- end }}
+ # the controller admin API should not receive requests to create admins or developers
+ # never enable SMTP on it as such
+ {{- if .Values.enterprise.smtp.enabled }}
+ - name: KONG_SMTP_MOCK
+ value: "on"
+ {{- else }}
+ - name: KONG_SMTP_MOCK
+ value: "on"
+ {{- end }}
+ {{- include "kong.license" . | nindent 8 }}
+ {{- end }}
+ {{- include "kong.env" . | indent 8 }}
+ {{- if .Values.admin.useTLS }}
+ - name: KONG_ADMIN_LISTEN
+ value: "0.0.0.0:{{ .Values.admin.containerPort }} ssl"
+ {{- else }}
+ - name: KONG_ADMIN_LISTEN
+ value: 0.0.0.0:{{ .Values.admin.containerPort }}
+ {{- end }}
+ {{- if .Values.postgresql.enabled }}
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.cassandra.enabled }}
+ - name: KONG_CASSANDRA_CONTACT_POINTS
+ value: {{ template "kong.cassandra.fullname" . }}
+ {{- end }}
+ ports:
+ - name: admin
+ containerPort: {{ .Values.admin.containerPort }}
+ protocol: TCP
+ readinessProbe:
+{{ toYaml .Values.readinessProbe | indent 10 }}
+ livenessProbe:
+{{ toYaml .Values.livenessProbe | indent 10 }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ {{- include "kong.controller-container" . | nindent 6 }}
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.rbac.create .Values.ingressController.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "kong.fullname" . }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "kong.fullname" . }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "kong.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.rbac.create .Values.ingressController.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+ name: {{ template "kong.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "kong.fullname" . }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "kong.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.rbac.create .Values.ingressController.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+ name: {{ template "kong.fullname" . }}
+ namespace: {{ .Release.namespace }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - pods
+ - secrets
+ - namespaces
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ resourceNames:
+ # Defaults to "<election-id>-<ingress-class>"
+ # Here: "<kong-ingress-controller-leader-nginx>-<nginx>"
+ # This has to be adapted if you change either parameter
+ # when launching the nginx-ingress-controller.
+ - "kong-ingress-controller-leader-{{ .Values.ingressController.ingressClass }}-{{ .Values.ingressController.ingressClass }}"
+ verbs:
+ - get
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.enabled .Values.ingressController.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "kong.serviceAccountName" . }}
+ namespace: {{ .Release.namespace }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.enabled .Values.ingressController.installCRDs -}}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kongconsumers.configuration.konghq.com
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ group: configuration.konghq.com
+ version: v1
+ scope: Namespaced
+ names:
+ kind: KongConsumer
+ plural: kongconsumers
+ shortNames:
+ - kc
+ additionalPrinterColumns:
+ - name: Username
+ type: string
+ description: Username of a Kong Consumer
+ JSONPath: .username
+ - name: Age
+ type: date
+ description: Age
+ JSONPath: .metadata.creationTimestamp
+ validation:
+ openAPIV3Schema:
+ properties:
+ username:
+ type: string
+ custom_id:
+ type: string
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.enabled .Values.ingressController.installCRDs -}}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kongcredentials.configuration.konghq.com
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ group: configuration.konghq.com
+ version: v1
+ scope: Namespaced
+ names:
+ kind: KongCredential
+ plural: kongcredentials
+ additionalPrinterColumns:
+ - name: Credential-type
+ type: string
+ description: Type of credential
+ JSONPath: .type
+ - name: Age
+ type: date
+ description: Age
+ JSONPath: .metadata.creationTimestamp
+ - name: Consumer-Ref
+ type: string
+ description: Owner of the credential
+ JSONPath: .consumerRef
+ validation:
+ openAPIV3Schema:
+ required:
+ - consumerRef
+ - type
+ properties:
+ consumerRef:
+ type: string
+ type:
+ type: string
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.enabled .Values.ingressController.installCRDs -}}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kongingresses.configuration.konghq.com
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ group: configuration.konghq.com
+ version: v1
+ scope: Namespaced
+ names:
+ kind: KongIngress
+ plural: kongingresses
+ shortNames:
+ - ki
+ validation:
+ openAPIV3Schema:
+ properties:
+ upstream:
+ type: object
+ route:
+ properties:
+ methods:
+ type: array
+ items:
+ type: string
+ regex_priority:
+ type: integer
+ strip_path:
+ type: boolean
+ preserve_host:
+ type: boolean
+ protocols:
+ type: array
+ items:
+ type: string
+ enum:
+ - http
+ - https
+ proxy:
+ type: object
+ properties:
+ protocol:
+ type: string
+ enum:
+ - http
+ - https
+ path:
+ type: string
+ pattern: ^/.*$
+ retries:
+ type: integer
+ minimum: 0
+ connect_timeout:
+ type: integer
+ minimum: 0
+ read_timeout:
+ type: integer
+ minimum: 0
+ write_timeout:
+ type: integer
+ minimum: 0
+ upstream:
+ type: object
+ properties:
+ hash_on:
+ type: string
+ hash_on_cookie:
+ type: string
+ hash_on_cookie_path:
+ type: string
+ hash_on_header:
+ type: string
+ hash_fallback_header:
+ type: string
+ hash_fallback:
+ type: string
+ slots:
+ type: integer
+ minimum: 10
+ healthchecks:
+ type: object
+ properties:
+ active:
+ type: object
+ properties:
+ concurrency:
+ type: integer
+ minimum: 1
+ timeout:
+ type: integer
+ minimum: 0
+ http_path:
+ type: string
+ pattern: ^/.*$
+ healthy: &healthy
+ type: object
+ properties:
+ http_statuses:
+ type: array
+ items:
+ type: integer
+ interval:
+ type: integer
+ minimum: 0
+ successes:
+ type: integer
+ minimum: 0
+ unhealthy: &unhealthy
+ type: object
+ properties:
+ http_failures:
+ type: integer
+ minimum: 0
+ http_statuses:
+ type: array
+ items:
+ type: integer
+ interval:
+ type: integer
+ minimum: 0
+ tcp_failures:
+ type: integer
+ minimum: 0
+ timeout:
+ type: integer
+ minimum: 0
+ passive:
+ type: object
+ properties:
+ healthy: *healthy
+ unhealthy: *unhealthy
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.enabled .Values.ingressController.installCRDs -}}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kongplugins.configuration.konghq.com
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ group: configuration.konghq.com
+ version: v1
+ scope: Namespaced
+ names:
+ kind: KongPlugin
+ plural: kongplugins
+ shortNames:
+ - kp
+ additionalPrinterColumns:
+ - name: Plugin-Type
+ type: string
+ description: Name of the plugin
+ JSONPath: .plugin
+ - name: Age
+ type: date
+ description: Age
+ JSONPath: .metadata.creationTimestamp
+ - name: Disabled
+ type: boolean
+ description: Indicates if the plugin is disabled
+ JSONPath: .disabled
+ priority: 1
+ - name: Config
+ type: string
+ description: Configuration of the plugin
+ JSONPath: .config
+ priority: 1
+ validation:
+ openAPIV3Schema:
+ required:
+ - plugin
+ properties:
+ plugin:
+ type: string
+ disabled:
+ type: boolean
+ config:
+ type: object
+{{- end -}}
--- /dev/null
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+ name: "{{ template "kong.fullname" . }}"
+ labels:
+ app: "{{ template "kong.name" . }}"
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ component: app
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
+ template:
+ metadata:
+ {{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
+ spec:
+ {{- if (and (.Values.ingressController) (eq .Values.env.database "off")) }}
+ serviceAccountName: {{ template "kong.serviceAccountName" . }}
+ {{ end }}
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if not (eq .Values.env.database "off") }}
+ initContainers:
+ {{- include "kong.wait-for-db" . | nindent 6 }}
+ {{ end }}
+ containers:
+ {{- if (and (.Values.ingressController) (eq .Values.env.database "off")) }}
+ {{- include "kong.controller-container" . | nindent 6 }}
+ {{ end }}
+ - name: {{ template "kong.name" . }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ {{- if not .Values.env.admin_listen }}
+ {{- if .Values.admin.useTLS }}
+ - name: KONG_ADMIN_LISTEN
+ value: "0.0.0.0:{{ .Values.admin.containerPort }} ssl"
+ {{- else }}
+ - name: KONG_ADMIN_LISTEN
+ value: 0.0.0.0:{{ .Values.admin.containerPort }}
+ {{- end }}
+ {{- end }}
+ {{- if not .Values.env.proxy_listen }}
+ - name: KONG_PROXY_LISTEN
+ value: {{ template "kong.kongProxyListenValue" . }}
+ {{- end }}
+ {{- if and (not .Values.env.admin_gui_listen) (.Values.enterprise.enabled) }}
+ - name: KONG_ADMIN_GUI_LISTEN
+ value: {{ template "kong.kongManagerListenValue" . }}
+ {{- end }}
+ {{- if and (not .Values.env.portal_gui_listen) (.Values.enterprise.enabled) (.Values.enterprise.portal.enabled) }}
+ - name: KONG_PORTAL_GUI_LISTEN
+ value: {{ template "kong.kongPortalListenValue" . }}
+ {{- end }}
+ {{- if and (not .Values.env.portal_api_listen) (.Values.enterprise.enabled) (.Values.enterprise.portal.enabled) }}
+ - name: KONG_PORTAL_API_LISTEN
+ value: {{ template "kong.kongPortalApiListenValue" . }}
+ {{- end }}
+ - name: KONG_NGINX_DAEMON
+ value: "off"
+ {{- if .Values.enterprise.enabled }}
+ {{- if .Values.enterprise.vitals.enabled }}
+ - name: KONG_VITALS
+ value: "on"
+ {{- end }}
+ {{- if .Values.enterprise.portal.enabled }}
+ - name: KONG_PORTAL
+ value: "on"
+ {{- if .Values.enterprise.portal.portal_auth }}
+ - name: KONG_PORTAL_AUTH
+ value: {{ .Values.enterprise.portal.portal_auth }}
+ - name: KONG_PORTAL_SESSION_CONF
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.enterprise.portal.session_conf_secret }}
+ key: portal_session_conf
+ {{- end }}
+ {{- end }}
+ {{- if .Values.enterprise.rbac.enabled }}
+ - name: KONG_ENFORCE_RBAC
+ value: "on"
+ - name: KONG_ADMIN_GUI_AUTH
+ value: {{ .Values.enterprise.rbac.admin_gui_auth | default "basic-auth" }}
+ - name: KONG_ADMIN_GUI_AUTH_CONF
+ value: {{ toJson .Values.enterprise.rbac.admin_gui_auth_conf | default "" }}
+ - name: KONG_ADMIN_GUI_SESSION_CONF
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.enterprise.rbac.session_conf_secret }}
+ key: admin_gui_session_conf
+ {{- end }}
+ {{- if .Values.enterprise.smtp.enabled }}
+ - name: KONG_PORTAL_EMAILS_FROM
+ value: {{ .Values.enterprise.smtp.portal_emails_from }}
+ - name: KONG_PORTAL_EMAILS_REPLY_TO
+ value: {{ .Values.enterprise.smtp.portal_emails_reply_to }}
+ - name: KONG_ADMIN_EMAILS_FROM
+ value: {{ .Values.enterprise.smtp.admin_emails_from }}
+ - name: KONG_ADMIN_EMAILS_REPLY_TO
+ value: {{ .Values.enterprise.smtp.admin_emails_reply_to }}
+ - name: KONG_SMTP_HOST
+ value: {{ .Values.enterprise.smtp.smtp_host }}
+ - name: KONG_SMTP_PORT
+ value: {{ .Values.enterprise.smtp.smtp_port }}
+ - name: KONG_SMTP_STARTTLS
+ value: {{ .Values.enterprise.smtp.smtp_starttls }}
+ {{- if .Values.enterprise.smtp.auth.smtp_username }}
+ - name: KONG_SMTP_USERNAME
+ value: {{ .Values.enterprise.smtp.auth.smtp_username }}
+ - name: KONG_SMTP_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.enterprise.smtp.auth.smtp_password }}
+ key: smtp_password
+ {{- end }}
+ {{- else }}
+ - name: KONG_SMTP_MOCK
+ value: "on"
+ {{- end }}
+ {{- include "kong.license" . | nindent 8 }}
+ {{- end }}
+ {{- include "kong.env" . | indent 8 }}
+ {{- if .Values.postgresql.enabled }}
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.cassandra.enabled }}
+ - name: KONG_CASSANDRA_CONTACT_POINTS
+ value: {{ template "kong.cassandra.fullname" . }}
+ {{- end }}
+ ports:
+ - name: admin
+ containerPort: {{ .Values.admin.containerPort }}
+ {{- if .Values.admin.hostPort }}
+ hostPort: {{ .Values.admin.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- if .Values.proxy.http.enabled }}
+ - name: proxy
+ containerPort: {{ .Values.proxy.http.containerPort }}
+ {{- if .Values.proxy.http.hostPort }}
+ hostPort: {{ .Values.proxy.http.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.proxy.tls.enabled }}
+ - name: proxy-tls
+ containerPort: {{ .Values.proxy.tls.containerPort }}
+ {{- if .Values.proxy.tls.hostPort }}
+ hostPort: {{ .Values.proxy.tls.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.enterprise.enabled }}
+ {{- if .Values.manager.http.enabled }}
+ - name: manager
+ containerPort: {{ .Values.manager.http.containerPort }}
+ {{- if .Values.manager.http.hostPort }}
+ hostPort: {{ .Values.manager.http.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.manager.tls.enabled }}
+ - name: manager-tls
+ containerPort: {{ .Values.manager.tls.containerPort }}
+ {{- if .Values.manager.tls.hostPort }}
+ hostPort: {{ .Values.manager.tls.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.portal.http.enabled }}
+ - name: portal
+ containerPort: {{ .Values.portal.http.containerPort }}
+ {{- if .Values.portal.http.hostPort }}
+ hostPort: {{ .Values.portal.http.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.portal.tls.enabled }}
+ - name: portal-tls
+ containerPort: {{ .Values.portal.tls.containerPort }}
+ {{- if .Values.portal.tls.hostPort }}
+ hostPort: {{ .Values.portal.tls.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.portalapi.http.enabled }}
+ - name: portalapi
+ containerPort: {{ .Values.portalapi.http.containerPort }}
+ {{- if .Values.portalapi.http.hostPort }}
+ hostPort: {{ .Values.portalapi.http.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.portalapi.tls.enabled }}
+ - name: portalapi-tls
+ containerPort: {{ .Values.portalapi.tls.containerPort }}
+ {{- if .Values.portalapi.tls.hostPort }}
+ hostPort: {{ .Values.portalapi.tls.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- end }}
+ readinessProbe:
+{{ toYaml .Values.readinessProbe | indent 10 }}
+ livenessProbe:
+{{ toYaml .Values.livenessProbe | indent 10 }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+ {{- end }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
--- /dev/null
+{{- if .Values.admin.ingress.enabled -}}
+{{- $serviceName := include "kong.fullname" . -}}
+{{- $servicePort := .Values.admin.servicePort -}}
+{{- $path := .Values.admin.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ template "kong.fullname" . }}-admin
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ annotations:
+ {{- range $key, $value := .Values.admin.ingress.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.admin.ingress.hosts }}
+ - host: {{ $host }}
+ http:
+ paths:
+ - path: {{ $path }}
+ backend:
+ serviceName: {{ $serviceName }}-admin
+ servicePort: {{ $servicePort }}
+ {{- end -}}
+ {{- if .Values.admin.ingress.tls }}
+ tls:
+{{ toYaml .Values.admin.ingress.tls | indent 4 }}
+ {{- end -}}
+{{- end -}}
\ No newline at end of file
--- /dev/null
+{{- if .Values.enterprise.enabled }}
+{{- if .Values.manager.ingress.enabled -}}
+{{- $serviceName := include "kong.fullname" . -}}
+{{- $servicePort := include "kong.ingress.servicePort" .Values.manager -}}
+{{- $path := .Values.manager.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ template "kong.fullname" . }}-manager
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ annotations:
+ {{- range $key, $value := .Values.manager.ingress.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.manager.ingress.hosts }}
+ - host: {{ $host }}
+ http:
+ paths:
+ - path: {{ $path }}
+ backend:
+ serviceName: {{ $serviceName }}-manager
+ servicePort: {{ $servicePort }}
+ {{- end -}}
+ {{- if .Values.manager.ingress.tls }}
+ tls:
+{{ toYaml .Values.manager.ingress.tls | indent 4 }}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
--- /dev/null
+{{- if .Values.enterprise.enabled }}
+{{- if .Values.portalapi.ingress.enabled -}}
+{{- $serviceName := include "kong.fullname" . -}}
+{{- $servicePort := include "kong.ingress.servicePort" .Values.portalapi -}}
+{{- $path := .Values.portalapi.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ template "kong.fullname" . }}-portalapi
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ annotations:
+ {{- range $key, $value := .Values.portalapi.ingress.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.portalapi.ingress.hosts }}
+ - host: {{ $host }}
+ http:
+ paths:
+ - path: {{ $path }}
+ backend:
+ serviceName: {{ $serviceName }}-portalapi
+ servicePort: {{ $servicePort }}
+ {{- end -}}
+ {{- if .Values.portalapi.ingress.tls }}
+ tls:
+{{ toYaml .Values.portalapi.ingress.tls | indent 4 }}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
--- /dev/null
+{{- if .Values.enterprise.enabled }}
+{{- if .Values.portal.ingress.enabled -}}
+{{- $serviceName := include "kong.fullname" . -}}
+{{- $servicePort := include "kong.ingress.servicePort" .Values.portal -}}
+{{- $path := .Values.portal.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ template "kong.fullname" . }}-portal
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ annotations:
+ {{- range $key, $value := .Values.portal.ingress.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.portal.ingress.hosts }}
+ - host: {{ $host }}
+ http:
+ paths:
+ - path: {{ $path }}
+ backend:
+ serviceName: {{ $serviceName }}-portal
+ servicePort: {{ $servicePort }}
+ {{- end -}}
+ {{- if .Values.portal.ingress.tls }}
+ tls:
+{{ toYaml .Values.portal.ingress.tls | indent 4 }}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
--- /dev/null
+{{- if .Values.proxy.ingress.enabled -}}
+{{- $serviceName := include "kong.fullname" . -}}
+{{- $servicePort := include "kong.ingress.servicePort" .Values.proxy -}}
+{{- $path := .Values.proxy.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ template "kong.fullname" . }}-proxy
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ annotations:
+ {{- range $key, $value := .Values.proxy.ingress.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.proxy.ingress.hosts }}
+ - host: {{ $host }}
+ http:
+ paths:
+ - path: {{ $path }}
+ backend:
+ serviceName: {{ $serviceName }}-proxy
+ servicePort: {{ $servicePort }}
+ {{- end -}}
+ {{- if .Values.proxy.ingress.tls }}
+ tls:
+{{ toYaml .Values.proxy.ingress.tls | indent 4 }}
+ {{- end -}}
+{{- end -}}
--- /dev/null
+{{- if (and (.Values.runMigrations) (not (eq .Values.env.database "off"))) }}
+# Why is this Job duplicated and not using only helm hooks?
+# See: https://github.com/helm/charts/pull/7362
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ template "kong.fullname" . }}-post-upgrade-migrations
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ component: post-upgrade-migrations
+ annotations:
+ helm.sh/hook: "post-upgrade"
+ helm.sh/hook-delete-policy: "before-hook-creation"
+spec:
+ template:
+ metadata:
+ name: {{ template "kong.name" . }}-post-upgrade-migrations
+ labels:
+ app: {{ template "kong.name" . }}
+ release: "{{ .Release.Name }}"
+ component: post-upgrade-migrations
+ spec:
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.postgresql.enabled }}
+ initContainers:
+ - name: wait-for-postgres
+ image: "{{ .Values.waitImage.repository }}:{{ .Values.waitImage.tag }}"
+ env:
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ command: [ "/bin/sh", "-c", "until nc -zv $KONG_PG_HOST $KONG_PG_PORT -w1; do echo 'waiting for db'; sleep 1; done" ]
+ {{- end }}
+ containers:
+ - name: {{ template "kong.name" . }}-post-upgrade-migrations
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: KONG_NGINX_DAEMON
+ value: "off"
+ {{- if .Values.enterprise.enabled }}
+ {{- include "kong.license" . | nindent 8 }}
+ {{- end }}
+ {{- include "kong.env" . | indent 8 }}
+ {{- if .Values.postgresql.enabled }}
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.cassandra.enabled }}
+ - name: KONG_CASSANDRA_CONTACT_POINTS
+ value: {{ template "kong.cassandra.fullname" . }}
+ {{- end }}
+ command: [ "/bin/sh", "-c", "kong migrations finish" ]
+ restartPolicy: OnFailure
+{{- end }}
--- /dev/null
+{{- if (and (.Values.runMigrations) (not (eq .Values.env.database "off"))) }}
+# Why is this Job duplicated and not using only helm hooks?
+# See: https://github.com/helm/charts/pull/7362
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ template "kong.fullname" . }}-pre-upgrade-migrations
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ component: pre-upgrade-migrations
+ annotations:
+ helm.sh/hook: "pre-upgrade"
+ helm.sh/hook-delete-policy: "before-hook-creation"
+spec:
+ template:
+ metadata:
+ name: {{ template "kong.name" . }}-pre-upgrade-migrations
+ labels:
+ app: {{ template "kong.name" . }}
+ release: "{{ .Release.Name }}"
+ component: pre-upgrade-migrations
+ spec:
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.postgresql.enabled }}
+ initContainers:
+ - name: wait-for-postgres
+ image: "{{ .Values.waitImage.repository }}:{{ .Values.waitImage.tag }}"
+ env:
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ command: [ "/bin/sh", "-c", "until nc -zv $KONG_PG_HOST $KONG_PG_PORT -w1; do echo 'waiting for db'; sleep 1; done" ]
+ {{- end }}
+ containers:
+ - name: {{ template "kong.name" . }}-upgrade-migrations
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: KONG_NGINX_DAEMON
+ value: "off"
+ {{- if .Values.enterprise.enabled }}
+ {{- include "kong.license" . | nindent 8 }}
+ {{- end }}
+ {{- include "kong.env" . | indent 8 }}
+ {{- if .Values.postgresql.enabled }}
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.cassandra.enabled }}
+ - name: KONG_CASSANDRA_CONTACT_POINTS
+ value: {{ template "kong.cassandra.fullname" . }}
+ {{- end }}
+ command: [ "/bin/sh", "-c", "kong migrations up" ]
+ restartPolicy: OnFailure
+{{- end }}
--- /dev/null
+{{- if (and (.Values.runMigrations) (not (eq .Values.env.database "off"))) }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ template "kong.fullname" . }}-init-migrations
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ component: init-migrations
+spec:
+ template:
+ metadata:
+ name: {{ template "kong.name" . }}-init-migrations
+ labels:
+ app: {{ template "kong.name" . }}
+ release: "{{ .Release.Name }}"
+ component: init-migrations
+ spec:
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.postgresql.enabled }}
+ initContainers:
+ - name: wait-for-postgres
+ image: "{{ .Values.waitImage.repository }}:{{ .Values.waitImage.tag }}"
+ env:
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ command: [ "/bin/sh", "-c", "until nc -zv $KONG_PG_HOST $KONG_PG_PORT -w1; do echo 'waiting for db'; sleep 1; done" ]
+ {{- end }}
+ containers:
+ - name: {{ template "kong.name" . }}-migrations
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: KONG_NGINX_DAEMON
+ value: "off"
+ {{- if .Values.enterprise.enabled }}
+ {{- include "kong.license" . | nindent 8 }}
+ {{- end }}
+ {{- include "kong.env" . | indent 8 }}
+ {{- if .Values.postgresql.enabled }}
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.cassandra.enabled }}
+ - name: KONG_CASSANDRA_CONTACT_POINTS
+ value: {{ template "kong.cassandra.fullname" . }}
+ {{- end }}
+ command: [ "/bin/sh", "-c", "kong migrations bootstrap" ]
+ restartPolicy: OnFailure
+{{- end }}
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "kong.fullname" . }}-admin
+ annotations:
+ {{- range $key, $value := .Values.admin.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ type: {{ .Values.admin.type }}
+ {{- if eq .Values.admin.type "LoadBalancer" }}
+ {{- if .Values.admin.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.admin.loadBalancerIP }}
+ {{- end }}
+ {{- if .Values.admin.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.admin.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: kong-admin
+ port: {{ .Values.admin.servicePort }}
+ targetPort: {{ .Values.admin.containerPort }}
+ {{- if (and (eq .Values.admin.type "NodePort") (not (empty .Values.admin.nodePort))) }}
+ nodePort: {{ .Values.admin.nodePort }}
+ {{- end }}
+ protocol: TCP
+ selector:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
--- /dev/null
+{{- if .Values.enterprise.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "kong.fullname" . }}-manager
+ annotations:
+ {{- range $key, $value := .Values.manager.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ type: {{ .Values.manager.type }}
+ {{- if eq .Values.manager.type "LoadBalancer" }}
+ {{- if .Values.manager.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.manager.loadBalancerIP }}
+ {{- end }}
+ {{- if .Values.manager.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.manager.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ externalIPs:
+ {{- range $ip := .Values.manager.externalIPs }}
+ - {{ $ip }}
+ {{- end }}
+ ports:
+ {{- if .Values.manager.http.enabled }}
+ - name: kong-manager
+ port: {{ .Values.manager.http.servicePort }}
+ targetPort: {{ .Values.manager.http.containerPort }}
+ {{- if (and (eq .Values.manager.type "NodePort") (not (empty .Values.manager.http.nodePort))) }}
+ nodePort: {{ .Values.manager.http.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+ {{- if or .Values.manager.tls.enabled }}
+ - name: kong-manager-tls
+ port: {{ .Values.manager.tls.servicePort }}
+ targetPort: {{ .Values.manager.tls.containerPort }}
+ {{- if (and (eq .Values.manager.type "NodePort") (not (empty .Values.manager.tls.nodePort))) }}
+ nodePort: {{ .Values.manager.tls.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+
+
+ selector:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
+{{- end -}}
--- /dev/null
+{{- if .Values.enterprise.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "kong.fullname" . }}-portalapi
+ annotations:
+ {{- range $key, $value := .Values.portalapi.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ type: {{ .Values.portalapi.type }}
+ {{- if eq .Values.portalapi.type "LoadBalancer" }}
+ {{- if .Values.portalapi.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.portalapi.loadBalancerIP }}
+ {{- end }}
+ {{- if .Values.portalapi.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.portalapi.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ externalIPs:
+ {{- range $ip := .Values.portalapi.externalIPs }}
+ - {{ $ip }}
+ {{- end }}
+ ports:
+ {{- if .Values.portalapi.http.enabled }}
+ - name: kong-portalapi
+ port: {{ .Values.portalapi.http.servicePort }}
+ targetPort: {{ .Values.portalapi.http.containerPort }}
+ {{- if (and (eq .Values.portalapi.type "NodePort") (not (empty .Values.portalapi.http.nodePort))) }}
+ nodePort: {{ .Values.portalapi.http.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+ {{- if or .Values.portalapi.tls.enabled }}
+ - name: kong-portalapi-tls
+ port: {{ .Values.portalapi.tls.servicePort }}
+ targetPort: {{ .Values.portalapi.tls.containerPort }}
+ {{- if (and (eq .Values.portalapi.type "NodePort") (not (empty .Values.portalapi.tls.nodePort))) }}
+ nodePort: {{ .Values.portalapi.tls.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+
+
+ selector:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
+{{- end -}}
--- /dev/null
+{{- if .Values.enterprise.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "kong.fullname" . }}-portal
+ annotations:
+ {{- range $key, $value := .Values.portal.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ type: {{ .Values.portal.type }}
+ {{- if eq .Values.portal.type "LoadBalancer" }}
+ {{- if .Values.portal.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.portal.loadBalancerIP }}
+ {{- end }}
+ {{- if .Values.portal.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.portal.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ externalIPs:
+ {{- range $ip := .Values.portal.externalIPs }}
+ - {{ $ip }}
+ {{- end }}
+ ports:
+ {{- if .Values.portal.http.enabled }}
+ - name: kong-portal
+ port: {{ .Values.portal.http.servicePort }}
+ targetPort: {{ .Values.portal.http.containerPort }}
+ {{- if (and (eq .Values.portal.type "NodePort") (not (empty .Values.portal.http.nodePort))) }}
+ nodePort: {{ .Values.portal.http.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+ {{- if or .Values.portal.tls.enabled }}
+ - name: kong-portal-tls
+ port: {{ .Values.portal.tls.servicePort }}
+ targetPort: {{ .Values.portal.tls.containerPort }}
+ {{- if (and (eq .Values.portal.type "NodePort") (not (empty .Values.portal.tls.nodePort))) }}
+ nodePort: {{ .Values.portal.tls.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+
+
+ selector:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
+{{- end -}}
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "kong.fullname" . }}-proxy
+ annotations:
+ {{- range $key, $value := .Values.proxy.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ type: {{ .Values.proxy.type }}
+ {{- if eq .Values.proxy.type "LoadBalancer" }}
+ {{- if .Values.proxy.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.proxy.loadBalancerIP }}
+ {{- end }}
+ {{- if .Values.proxy.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.proxy.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ externalIPs:
+ {{- range $ip := .Values.proxy.externalIPs }}
+ - {{ $ip }}
+ {{- end }}
+ ports:
+ {{- if .Values.proxy.http.enabled }}
+ - name: kong-proxy
+ port: {{ .Values.proxy.http.servicePort }}
+ targetPort: {{ .Values.proxy.http.containerPort }}
+ {{- if (and (eq .Values.proxy.type "NodePort") (not (empty .Values.proxy.http.nodePort))) }}
+ nodePort: {{ .Values.proxy.http.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+ {{- if or .Values.proxy.tls.enabled }}
+ - name: kong-proxy-tls
+ port: {{ .Values.proxy.tls.servicePort }}
+ targetPort: {{ .Values.proxy.tls.containerPort }}
+ {{- if (and (eq .Values.proxy.type "NodePort") (not (empty .Values.proxy.tls.nodePort))) }}
+ nodePort: {{ .Values.proxy.tls.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.proxy.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ .Values.proxy.externalTrafficPolicy }}
+ {{- end }}
+
+ selector:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
--- /dev/null
+# Default values for kong.
+# Declare variables to be passed into your templates.
+
+image:
+ repository: kong
+ # repository: kong-docker-kong-enterprise-edition-docker.bintray.io/kong-enterprise-edition
+ tag: 1.2
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## If using the official Kong Enterprise registry above, you MUST provide a secret.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+waitImage:
+ repository: busybox
+ tag: latest
+
+# Specify Kong admin and proxy services configurations
+admin:
+ # If you want to specify annotations for the admin service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTPS traffic on the admin port
+ # if set to false also set readinessProbe and livenessProbe httpGet scheme's to 'HTTP'
+ useTLS: true
+ servicePort: 8444
+ containerPort: 8444
+ # Kong admin service type
+ type: NodePort
+ # Set a nodePort which is available
+ # nodePort: 32444
+ # Kong admin ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-admin.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+proxy:
+ # If you want to specify annotations for the proxy service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 80
+ containerPort: 8000
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 443
+ containerPort: 8443
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+manager:
+ # If you want to specify annotations for the Manager service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 8002
+ containerPort: 8002
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 8445
+ containerPort: 8445
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+portal:
+ # If you want to specify annotations for the Portal service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 8003
+ containerPort: 8003
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 8446
+ containerPort: 8446
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+portalapi:
+ # If you want to specify annotations for the Portal API service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 8004
+ containerPort: 8004
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 8447
+ containerPort: 8447
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+# Toggle Kong Enterprise features on or off
+# RBAC and SMTP configuration have additional options that must all be set together
+# Other settings should be added to the "env" settings below
+enterprise:
+ enabled: false
+ # Kong Enterprise license secret name
+ # This secret must contain a single 'license' key, containing your base64-encoded license data
+ # The license secret is required for all Kong Enterprise deployments
+ license_secret: you-must-create-a-kong-license-secret
+ # Session configuration secret
+ # The session conf secret is required if using RBAC or the Portal
+ vitals:
+ enabled: true
+ portal:
+ enabled: false
+ # portal_auth here sets the default authentication mechanism for the Portal
+ # FIXME This can be changed per-workspace, but must currently default to
+ # basic-auth to work around limitations with session configuration
+ portal_auth: basic-auth
+ # If the Portal is enabled and any workspace's Portal uses authentication,
+ # this Secret must contain an portal_session_conf key
+ # The key value must be a secret configuration, following the example at https://docs.konghq.com/enterprise/0.35-x/kong-manager/authentication/sessions/
+ session_conf_secret: you-must-create-a-portal-session-conf-secret
+ rbac:
+ enabled: false
+ admin_gui_auth: basic-auth
+ # If RBAC is enabled, this Secret must contain an admin_gui_session_conf key
+ # The key value must be a secret configuration, following the example at https://docs.konghq.com/enterprise/0.35-x/kong-manager/authentication/sessions/
+ session_conf_secret: you-must-create-an-rbac-session-conf-secret
+ # Set to the appropriate plugin config JSON if not using basic-auth
+ # admin_gui_auth_conf: ''
+ smtp:
+ enabled: false
+ portal_emails_from: none@example.com
+ portal_emails_reply_to: none@example.com
+ admin_emails_from: none@example.com
+ admin_emails_reply_to: none@example.com
+ smtp_admin_emails: none@example.com
+ smtp_host: smtp.example.com
+ smtp_port: 587
+ smtp_starttls: true
+ auth:
+ # If your SMTP server does not require authentication, this section can
+ # be left as-is. If smtp_username is set to anything other than an empty
+ # string, you must create a Secret with an smtp_password key containing
+ # your SMTP password and specify its name here.
+ smtp_username: '' # e.g. postmaster@example.com
+ smtp_password_secret: you-must-create-an-smtp-password
+
+# Set runMigrations to run Kong migrations
+runMigrations: true
+
+# Specify Kong configurations
+# Kong configurations guide https://getkong.org/docs/latest/configuration/
+env:
+ database: postgres
+ proxy_access_log: /dev/stdout
+ admin_access_log: /dev/stdout
+ admin_gui_access_log: /dev/stdout
+ portal_api_access_log: /dev/stdout
+ proxy_error_log: /dev/stderr
+ admin_error_log: /dev/stderr
+ admin_gui_error_log: /dev/stderr
+ portal_api_error_log: /dev/stderr
+
+# If you want to specify resources, uncomment the following
+# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+# readinessProbe for Kong pods
+# If using Kong Enterprise with RBAC, you must add a Kong-Admin-Token header
+readinessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ timeoutSeconds: 1
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 5
+
+# livenessProbe for Kong pods
+# If using Kong Enterprise with RBAC, you must add a Kong-Admin-Token header
+livenessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ periodSeconds: 30
+ successThreshold: 1
+ failureThreshold: 5
+
+# Affinity for pod assignment
+# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+# affinity: {}
+
+# Tolerations for pod assignment
+# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+tolerations: []
+
+# Node labels for pod assignment
+# Ref: https://kubernetes.io/docs/user-guide/node-selection/
+nodeSelector: {}
+
+# Annotation to be added to Kong pods
+podAnnotations: {}
+
+# Kong pod count
+replicaCount: 1
+
+# Kong has a choice of either Postgres or Cassandra as a backend datatstore.
+# This chart allows you to choose either of them with the `database.type`
+# parameter. Postgres is chosen by default.
+
+# Additionally, this chart allows you to use your own database or spin up a new
+# instance by using the `postgres.enabled` or `cassandra.enabled` parameters.
+# Enabling both will create both databases in your cluster, but only one
+# will be used by Kong based on the `env.database` parameter.
+# Postgres is enabled by default.
+
+# Cassandra chart configs
+cassandra:
+ enabled: false
+
+# PostgreSQL chart configs
+postgresql:
+ enabled: true
+ postgresqlUsername: kong
+ postgresqlDatabase: kong
+ service:
+ port: 5432
+
+# Kong Ingress Controller's primary purpose is to satisfy Ingress resources
+# created in k8s. It uses CRDs for more fine grained control over routing and
+# for Kong specific configuration.
+ingressController:
+ enabled: false
+ image:
+ repository: kong-docker-kubernetes-ingress-controller.bintray.io/kong-ingress-controller
+ tag: 0.4.0
+ replicaCount: 1
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: "/healthz"
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: "/healthz"
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+
+ installCRDs: true
+
+ rbac:
+ # Specifies whether RBAC resources should be created
+ create: true
+
+ serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+ ingressClass: kong
proxy:
http:
- nodePort: 31080
+ nodePort: 32080
tls:
- nodePort: 31443
+ nodePort: 32443
# These port numbers MUST matche with what's in
# ric-common/Common-Template/helm/ric-common/templates/_ingresscontroller.tpl file.
# If need to change a proxy port here, do not forget to update the
--- /dev/null
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+OWNERS
--- /dev/null
+apiVersion: v1
+appVersion: "1.2"
+description: The Cloud-Native Ingress and Service Mesh for APIs and Microservices
+engine: gotpl
+home: https://KongHQ.com/
+icon: https://s3.amazonaws.com/downloads.kong/universe/assets/icon-kong-inc-large.png
+maintainers:
+- email: shashi@konghq.com
+ name: shashiranjan84
+name: kong
+sources:
+- https://github.com/Kong/kong
+version: 0.12.2
--- /dev/null
+## Kong
+
+[Kong](https://KongHQ.com/) is an open-source API Gateway and Microservices
+Management Layer, delivering high performance and reliability.
+
+## TL;DR;
+
+```bash
+$ helm install stable/kong
+```
+
+## Introduction
+
+This chart bootstraps all the components needed to run Kong on a [Kubernetes](http://kubernetes.io)
+cluster using the [Helm](https://helm.sh) package manager.
+
+## Prerequisites
+
+- Kubernetes 1.8+ with Beta APIs enabled.
+- PV provisioner support in the underlying infrastructure if persistence
+ is needed for Kong datastore.
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```bash
+$ helm install --name my-release stable/kong
+```
+
+If using Kong Enterprise, several additional steps are necessary before
+installing the chart. At minimum, you must:
+* Create a [license secret](#license).
+* Set `enterprise.enabled: true` in values.yaml.
+* Update values.yaml to use a Kong Enterprise image. If needed, follow the
+instructions in values.yaml to add a registry pull secret.
+
+Reading through [the full list of Enterprise considerations](#kong-enterprise-specific-parameters)
+is recommended.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```bash
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the
+chart and deletes the release.
+
+## Configuration
+
+### General Configuration Parameters
+
+The following table lists the configurable parameters of the Kong chart
+and their default values.
+
+| Parameter | Description | Default |
+| ------------------------------ | -------------------------------------------------------------------------------- | ------------------- |
+| image.repository | Kong image | `kong` |
+| image.tag | Kong image version | `1.2` |
+| image.pullPolicy | Image pull policy | `IfNotPresent` |
+| image.pullSecrets | Image pull secrets | `null` |
+| replicaCount | Kong instance count | `1` |
+| admin.useTLS | Secure Admin traffic | `true` |
+| admin.servicePort | TCP port on which the Kong admin service is exposed | `8444` |
+| admin.containerPort | TCP port on which Kong app listens for admin traffic | `8444` |
+| admin.nodePort | Node port when service type is `NodePort` | |
+| admin.hostPort | Host port to use for admin traffic | |
+| admin.type | k8s service type, Options: NodePort, ClusterIP, LoadBalancer | `NodePort` |
+| admin.loadBalancerIP | Will reuse an existing ingress static IP for the admin service | `null` |
+| admin.loadBalancerSourceRanges | Limit admin access to CIDRs if set and service type is `LoadBalancer` | `[]` |
+| admin.ingress.enabled | Enable ingress resource creation (works with proxy.type=ClusterIP) | `false` |
+| admin.ingress.tls | Name of secret resource, containing TLS secret | |
+| admin.ingress.hosts | List of ingress hosts. | `[]` |
+| admin.ingress.path | Ingress path. | `/` |
+| admin.ingress.annotations | Ingress annotations. See documentation for your ingress controller for details | `{}` |
+| proxy.http.enabled | Enables http on the proxy | true |
+| proxy.http.servicePort | Service port to use for http | 80 |
+| proxy.http.containerPort | Container port to use for http | 8000 |
+| proxy.http.nodePort | Node port to use for http | 32080 |
+| proxy.http.hostPort | Host port to use for http | |
+| proxy.tls.enabled | Enables TLS on the proxy | true |
+| proxy.tls.containerPort | Container port to use for TLS | 8443 |
+| proxy.tls.servicePort | Service port to use for TLS | 8443 |
+| proxy.tls.nodePort | Node port to use for TLS | 32443 |
+| proxy.tls.hostPort | Host port to use for TLS | |
+| proxy.type | k8s service type. Options: NodePort, ClusterIP, LoadBalancer | `NodePort` |
+| proxy.loadBalancerSourceRanges | Limit proxy access to CIDRs if set and service type is `LoadBalancer` | `[]` |
+| proxy.loadBalancerIP | To reuse an existing ingress static IP for the admin service | |
+| proxy.externalIPs | IPs for which nodes in the cluster will also accept traffic for the proxy | `[]` |
+| proxy.externalTrafficPolicy | k8s service's externalTrafficPolicy. Options: Cluster, Local | |
+| proxy.ingress.enabled | Enable ingress resource creation (works with proxy.type=ClusterIP) | `false` |
+| proxy.ingress.tls | Name of secret resource, containing TLS secret | |
+| proxy.ingress.hosts | List of ingress hosts. | `[]` |
+| proxy.ingress.path | Ingress path. | `/` |
+| proxy.ingress.annotations | Ingress annotations. See documentation for your ingress controller for details | `{}` |
+| env | Additional [Kong configurations](https://getkong.org/docs/latest/configuration/) | |
+| runMigrations | Run Kong migrations job | `true` |
+| readinessProbe | Kong readiness probe | |
+| livenessProbe | Kong liveness probe | |
+| affinity | Node/pod affinities | |
+| nodeSelector | Node labels for pod assignment | `{}` |
+| podAnnotations | Annotations to add to each pod | `{}` |
+| resources | Pod resource requests & limits | `{}` |
+| tolerations | List of node taints to tolerate | `[]` |
+
+### Admin/Proxy listener override
+
+If you specify `env.admin_listen` or `env.proxy_listen`, this chart will use
+the value provided by you as opposed to constructing a listen variable
+from fields like `proxy.http.containerPort` and `proxy.http.enabled`. This allows
+you to be more prescriptive when defining listen directives.
+
+**Note:** Overriding `env.proxy_listen` and `env.admin_listen` will potentially cause
+`admin.containerPort`, `proxy.http.containerPort` and `proxy.tls.containerPort` to become out of sync,
+and therefore must be updated accordingly.
+
+I.E. updatating to `env.proxy_listen: 0.0.0.0:4444, 0.0.0.0:4443 ssl` will need
+`proxy.http.containerPort: 4444` and `proxy.tls.containerPort: 4443` to be set in order
+for the service definition to work properly.
+
+### Kong-specific parameters
+
+Kong has a choice of either Postgres or Cassandra as a backend datatstore.
+This chart allows you to choose either of them with the `env.database`
+parameter. Postgres is chosen by default.
+
+Additionally, this chart allows you to use your own database or spin up a new
+instance by using the `postgres.enabled` or `cassandra.enabled` parameters.
+Enabling both will create both databases in your cluster, but only one
+will be used by Kong based on the `env.database` parameter.
+Postgres is enabled by default.
+
+| Parameter | Description | Default |
+| ------------------------------ | -------------------------------------------------------------------- | ------------------- |
+| cassandra.enabled | Spin up a new cassandra cluster for Kong | `false` |
+| postgresql.enabled | Spin up a new postgres instance for Kong | `true` |
+| waitImage.repository | Image used to wait for database to become ready | `busybox` |
+| waitImage.tag | Tag for image used to wait for database to become ready | `latest` |
+| env.database | Choose either `postgres` or `cassandra` | `postgres` |
+| env.pg_user | Postgres username | `kong` |
+| env.pg_database | Postgres database name | `kong` |
+| env.pg_password | Postgres database password (required if you are using your own database)| `kong` |
+| env.pg_host | Postgres database host (required if you are using your own database) | `` |
+| env.pg_port | Postgres database port | `5432` |
+| env.cassandra_contact_points | Cassandra contact points (required if you are using your own database) | `` |
+| env.cassandra_port | Cassandra query port | `9042` |
+| env.cassandra_keyspace | Cassandra keyspace | `kong` |
+| env.cassandra_repl_factor | Replication factor for the Kong keyspace | `2` |
+
+
+All `kong.env` parameters can also accept a mapping instead of a value to ensure the parameters can be set through configmaps and secrets.
+
+An example :
+
+```yaml
+kong:
+ env:
+ pg_user: kong
+ pg_password:
+ valueFrom:
+ secretKeyRef:
+ key: kong
+ name: postgres
+```
+
+
+For complete list of Kong configurations please check https://getkong.org/docs/latest/configuration/.
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install stable/kong --name my-release \
+ --set=image.tag=1.2,env.database=cassandra,cassandra.enabled=true
+```
+
+Alternatively, a YAML file that specifies the values for the above parameters
+can be provided while installing the chart. For example,
+
+```console
+$ helm install stable/kong --name my-release -f values.yaml
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+### Kong Enterprise-specific parameters
+
+Kong Enterprise requires some additional configuration not needed when using
+Kong OSS. Some of the more important configuration is grouped in sections
+under the `.enterprise` key in values.yaml, though most enterprise-specific
+configuration can be placed under the `.env` key.
+
+To use Kong Enterprise, change your image to a Kong Enterprise image and set
+`.enterprise.enabled: true` in values.yaml to render Enterprise sections of the
+templates. Review the sections below for other settings you should consider
+configuring before installing the chart.
+
+#### Service location hints
+
+Kong Enterprise add two GUIs, Kong Manager and the Kong Developer Portal, that
+must know where other Kong services (namely the admin and files APIs) can be
+accessed in order to function properly. Kong's default behavior for attempting
+to locate these absent configuration is unlikely to work in common Kubernetes
+environments. Because of this, you should set each of `admin_gui_url`,
+`admin_api_uri`, `proxy_url`, `portal_api_url`, `portal_gui_host`, and
+`portal_gui_protocol` under the `.env` key in values.yaml to locations where
+each of their respective services can be accessed to ensure that Kong services
+can locate one another and properly set CORS headers. See the [Property Reference documentation](https://docs.konghq.com/enterprise/0.35-x/property-reference/)
+for more details on these settings.
+
+#### License
+
+All Kong Enterprise deployments require a license. If you do not have a copy
+of yours, please contact Kong Support. Once you have it, you will need to
+store it in a Secret. Save your secret in a file named `license` (no extension)
+and then create and inspect your secret:
+
+```
+$ kubectl create secret generic kong-enterprise-license --from-file=./license
+$ kubectl get secret kong-enterprise-license -o yaml
+apiVersion: v1
+data:
+ license: eyJsaWNlbnNlIjp7InNpZ25hdHVyZSI6IkhFWSBJIFNFRSBZT1UgUEVFS0lORyBJTlNJREUgTVkgQkFTRTY0IEVYQU1QTEUiLCJwYXlsb2FkIjp7ImN1c3RvbWVyIjoiV0VMTCBUT08gQkFEIiwibGljZW5zZV9jcmVhdGlvbl9kYXRlIjoiMjAxOC0wNi0wNSIsInByb2R1Y3Rfc3Vic2NyaXB0aW9uIjoiVEhFUkVTIE5PVEhJTkcgSEVSRSIsImFkbWluX3NlYXRzIjoiNSIsInN1cHBvcnRfcGxhbiI6IkZha2UiLCJsaWNlbnNlX2V4cGlyYXRpb25fZGF0ZSI6IjIwMjAtMjAtMjAiLCJsaWNlbnNlX2tleSI6IlRTT0kgWkhJViJ9LCJ2ZXJzaW9uIjoxfX0K
+kind: Secret
+metadata:
+ creationTimestamp: "2019-05-17T21:45:16Z"
+ name: kong-enterprise-license
+ namespace: default
+ resourceVersion: "48695485"
+ selfLink: /api/v1/namespaces/default/secrets/kong-enterprise-license
+ uid: 0f2e8903-78ed-11e9-b1a6-42010a8a02ec
+type: Opaque
+```
+Set the secret name in values.yaml, in the `.enterprise.license_secret` key.
+
+#### RBAC
+
+Note that you can create a default RBAC superuser when initially setting up an
+environment, by setting the `KONG_PASSWORD` environment variable on the initial
+migration Job's Pod. This will create a `kong_admin` admin whose token and
+basic-auth password match the value of `KONG_PASSWORD`
+
+Using RBAC within Kubernetes environments requires providing Kubernetes an RBAC
+user for its readiness and liveness checks. We recommend creating a user that
+has permission to read `/status` and nothing else. For example, with RBAC still
+disabled:
+
+```
+$ curl -sX POST http://admin.kong.example/rbac/users --data name=statuschecker --data user_token=REPLACE_WITH_SOME_TOKEN
+{"user_token_ident":"45239","user_token":"$2b$09$cL.xbvRQCzE35A0osl8VTej7u0BgJOIgpTVjxpwZ1U8.jNdMwyQRW","id":"fe8824dc-09a7-4b68-b5e6-541e4b9b4ced","name":"statuschecker","enabled":true,"comment":null,"created_at":1558131229}
+
+$ curl -sX POST http://admin.kong.example/rbac/roles --data name=read-status
+{"comment":null,"created_at":1558131353,"id":"e32507a5-e636-40b2-88c0-090042db7d79","name":"read-status","is_default":false}
+
+$ curl -sX POST http://admin.kong.example/rbac/roles/read-status/endpoints --data endpoint="/status" --data actions=read
+{"endpoint":"\/status","created_at":1558131423,"workspace":"default","actions":["read"],"negative":false,"role":{"id":"e32507a5-e636-40b2-88c0-090042db7d79"}}
+
+$ curl -sX POST http://admin.kong.example/rbac/users/statuschecker/roles --data roles=read-status
+{"roles":[{"created_at":1558131353,"id":"e32507a5-e636-40b2-88c0-090042db7d79","name":"read-status"}],"user":{"user_token_ident":"45239","user_token":"$2b$09$cL.xbvRQCzE35A0osl8VTej7u0BgJOIgpTVjxpwZ1U8.jNdMwyQRW","id":"fe8824dc-09a7-4b68-b5e6-541e4b9b4ced","name":"statuschecker","comment":null,"enabled":true,"created_at":1558131229}}
+```
+Probes will then need to include that user's token, e.g. for the readinessProbe:
+
+```
+readinessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTP
+ httpHeaders:
+ - name: Kong-Admin-Token
+ value: REPLACE_WITH_SOME_TOKEN
+ ...
+```
+
+Note that RBAC is **NOT** currently enabled on the admin API container for the
+controller Pod when the ingress controller is enabled. This admin API container
+is not exposed outside the Pod, so only the controller can interact with it. We
+intend to add RBAC to this container in the future after updating the controller
+to add support for storing its RBAC token in a Secret, as currently it would
+need to be stored in plaintext. RBAC is still enforced on the admin API of the
+main deployment when using the ingress controller, as that admin API *is*
+accessible outside the Pod.
+
+#### Sessions
+
+Login sessions for Kong Manager and the Developer Portal make use of [the Kong
+Sessions plugin](https://docs.konghq.com/enterprise/0.35-x/kong-manager/authentication/sessions/).
+Their configuration must be stored in Secrets, as it contains an HMAC key.
+If using either RBAC or the Portal, create a Secret with `admin_gui_session_conf`
+and `portal_session_conf` keys.
+
+```
+$ cat admin_gui_session_conf
+{"cookie_name":"admin_session","cookie_samesite":"off","secret":"admin-secret-CHANGEME","cookie_secure":true,"storage":"kong"}
+$ cat portal_session_conf
+{"cookie_name":"portal_session","cookie_samesite":"off","secret":"portal-secret-CHANGEME","cookie_secure":true,"storage":"kong"}
+$ kubectl create secret generic kong-session-config --from-file=admin_gui_session_conf --from-file=portal_session_conf
+secret/kong-session-config created
+```
+The exact plugin settings may vary in your environment. The `secret` should
+always be changed for both configurations.
+
+After creating your secret, set its name in values.yaml, in the
+`.enterprise.rbac.session_conf_secret` and
+`.enterprise.rbac.session_conf_secret` keys.
+
+#### Email/SMTP
+
+Email is used to send invitations for [Kong Admins](https://docs.konghq.com/enterprise/enterprise/0.35-x/kong-manager/networking/email/)
+and [Developers](https://docs.konghq.com/enterprise/enterprise/0.35-x/developer-portal/configuration/smtp/).
+
+Email invitations rely on setting a number of SMTP settings at once. For
+convenience, these are grouped under the `.enterprise.smtp` key in values.yaml.
+Setting `.enterprise.smtp.disabled: true` will set `KONG_SMTP_MOCK=on` and
+allow Admin/Developer invites to proceed without sending email. Note, however,
+that these have limited functionality without sending email.
+
+If your SMTP server requires authentication, you should the `username` and
+`smtp_password_secret` keys under `.enterprise.smtp.auth`.
+`smtp_password_secret` must be a Secret containing an `smtp_password` key whose
+value is your SMTP password.
+
+### Kong Ingress Controller
+
+Kong Ingress Controller's primary purpose is to satisfy Ingress resources
+created in your Kubernetes cluster.
+It uses CRDs for more fine grained control over routing and
+for Kong specific configuration.
+To deploy the ingress controller together with
+kong run the following command:
+
+```bash
+# without a database
+helm install stable/kong --set ingressController.enabled=true \
+ --set postgresql.enabled=false --set env.database=off
+# with a database
+helm install stable/kong --set ingressController.enabled=true
+```
+
+If you like to use a static IP:
+
+```shell
+helm install stable/kong --set ingressController.enabled=true --set proxy.loadBalancerIP=[Your IP goes there] --set proxy.type=LoadBalancer --name kong --namespace kong
+```
+
+**Note**: Kong Ingress controller doesn't support custom SSL certificates
+on Admin port. We will be removing this limitation in the future.
+
+Kong ingress controller relies on several Custom Resource Definition objects to
+declare the the Kong configurations and synchronize the configuration with the
+Kong admin API. Each of this new objects declared in Kubernetes have a
+one-to-one relation with a Kong resource.
+The custom resources are:
+
+- KongConsumer
+- KongCredential
+- KongPlugin
+- KongIngress
+
+You can can learn about kong ingress custom resource definitions [here](https://github.com/Kong/kubernetes-ingress-controller/blob/master/docs/custom-resources.md).
+
+| Parameter | Description | Default |
+| --------------- | ----------------------------------------- | ---------------------------------------------------------------------------- |
+| enabled | Deploy the ingress controller, rbac and crd | false |
+| replicaCount | Number of desired ingress controllers | 1 |
+| image.repository | Docker image with the ingress controller | kong-docker-kubernetes-ingress-controller.bintray.io/kong-ingress-controller |
+| image.tag | Version of the ingress controller | 0.2.0 |
+| readinessProbe | Kong ingress controllers readiness probe | |
+| livenessProbe | Kong ingress controllers liveness probe | |
+| ingressClass | The ingress-class value for controller | nginx
--- /dev/null
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+OWNERS
--- /dev/null
+appVersion: 3.11.3
+description: Apache Cassandra is a free and open-source distributed database management
+ system designed to handle large amounts of data across many commodity servers, providing
+ high availability with no single point of failure.
+engine: gotpl
+home: http://cassandra.apache.org
+icon: https://upload.wikimedia.org/wikipedia/commons/thumb/5/5e/Cassandra_logo.svg/330px-Cassandra_logo.svg.png
+keywords:
+- cassandra
+- database
+- nosql
+maintainers:
+- email: goonohc@gmail.com
+ name: KongZ
+name: cassandra
+version: 0.10.5
--- /dev/null
+# Cassandra
+A Cassandra Chart for Kubernetes
+
+## Install Chart
+To install the Cassandra Chart into your Kubernetes cluster (This Chart requires persistent volume by default, you may need to create a storage class before install chart. To create storage class, see [Persist data](#persist_data) section)
+
+```bash
+helm install --namespace "cassandra" -n "cassandra" incubator/cassandra
+```
+
+After installation succeeds, you can get a status of Chart
+
+```bash
+helm status "cassandra"
+```
+
+If you want to delete your Chart, use this command
+```bash
+helm delete --purge "cassandra"
+```
+
+## Persist data
+You need to create `StorageClass` before able to persist data in persistent volume.
+To create a `StorageClass` on Google Cloud, run the following
+
+```bash
+kubectl create -f sample/create-storage-gce.yaml
+```
+
+And set the following values in `values.yaml`
+
+```yaml
+persistence:
+ enabled: true
+```
+
+If you want to create a `StorageClass` on other platform, please see documentation here [https://kubernetes.io/docs/user-guide/persistent-volumes/](https://kubernetes.io/docs/user-guide/persistent-volumes/)
+
+When running a cluster without persistence, the termination of a pod will first initiate a decommissioning of that pod.
+Depending on the amount of data stored inside the cluster this may take a while. In order to complete a graceful
+termination, pods need to get more time for it. Set the following values in `values.yaml`:
+
+```yaml
+podSettings:
+ terminationGracePeriodSeconds: 1800
+```
+
+## Install Chart with specific cluster size
+By default, this Chart will create a cassandra with 3 nodes. If you want to change the cluster size during installation, you can use `--set config.cluster_size={value}` argument. Or edit `values.yaml`
+
+For example:
+Set cluster size to 5
+
+```bash
+helm install --namespace "cassandra" -n "cassandra" --set config.cluster_size=5 incubator/cassandra/
+```
+
+## Install Chart with specific resource size
+By default, this Chart will create a cassandra with CPU 2 vCPU and 4Gi of memory which is suitable for development environment.
+If you want to use this Chart for production, I would recommend to update the CPU to 4 vCPU and 16Gi. Also increase size of `max_heap_size` and `heap_new_size`.
+To update the settings, edit `values.yaml`
+
+## Install Chart with specific node
+Sometime you may need to deploy your cassandra to specific nodes to allocate resources. You can use node selector by edit `nodes.enabled=true` in `values.yaml`
+For example, you have 6 vms in node pools and you want to deploy cassandra to node which labeled as `cloud.google.com/gke-nodepool: pool-db`
+
+Set the following values in `values.yaml`
+
+```yaml
+nodes:
+ enabled: true
+ selector:
+ nodeSelector:
+ cloud.google.com/gke-nodepool: pool-db
+```
+
+## Configuration
+
+The following table lists the configurable parameters of the Cassandra chart and their default values.
+
+| Parameter | Description | Default |
+| ----------------------- | --------------------------------------------- | ---------------------------------------------------------- |
+| `image.repo` | `cassandra` image repository | `cassandra` |
+| `image.tag` | `cassandra` image tag | `3.11.3` |
+| `image.pullPolicy` | Image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` |
+| `image.pullSecrets` | Image pull secrets | `nil` |
+| `config.cluster_domain` | The name of the cluster domain. | `cluster.local` |
+| `config.cluster_name` | The name of the cluster. | `cassandra` |
+| `config.cluster_size` | The number of nodes in the cluster. | `3` |
+| `config.seed_size` | The number of seed nodes used to bootstrap new clients joining the cluster. | `2` |
+| `config.seeds` | The comma-separated list of seed nodes. | Automatically generated according to `.Release.Name` and `config.seed_size` |
+| `config.num_tokens` | Initdb Arguments | `256` |
+| `config.dc_name` | Initdb Arguments | `DC1` |
+| `config.rack_name` | Initdb Arguments | `RAC1` |
+| `config.endpoint_snitch` | Initdb Arguments | `SimpleSnitch` |
+| `config.max_heap_size` | Initdb Arguments | `2048M` |
+| `config.heap_new_size` | Initdb Arguments | `512M` |
+| `config.ports.cql` | Initdb Arguments | `9042` |
+| `config.ports.thrift` | Initdb Arguments | `9160` |
+| `config.ports.agent` | The port of the JVM Agent (if any) | `nil` |
+| `config.start_rpc` | Initdb Arguments | `false` |
+| `configOverrides` | Overrides config files in /etc/cassandra dir | `{}` |
+| `commandOverrides` | Overrides default docker command | `[]` |
+| `argsOverrides` | Overrides default docker args | `[]` |
+| `env` | Custom env variables | `{}` |
+| `persistence.enabled` | Use a PVC to persist data | `true` |
+| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) |
+| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` |
+| `persistence.size` | Size of data volume | `10Gi` |
+| `resources` | CPU/Memory resource requests/limits | Memory: `4Gi`, CPU: `2` |
+| `service.type` | k8s service type exposing ports, e.g. `NodePort`| `ClusterIP` |
+| `podManagementPolicy` | podManagementPolicy of the StatefulSet | `OrderedReady` |
+| `podDisruptionBudget` | Pod distruption budget | `{}` |
+| `podAnnotations` | pod annotations for the StatefulSet | `{}` |
+| `updateStrategy.type` | UpdateStrategy of the StatefulSet | `OnDelete` |
+| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `90` |
+| `livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
+| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` |
+| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `90` |
+| `readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
+| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` |
+| `rbac.create` | Specifies whether RBAC resources should be created | `true` |
+| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
+| `serviceAccount.name` | The name of the ServiceAccount to use | |
+| `backup.enabled` | Enable backup on chart installation | `false` |
+| `backup.schedule` | Keyspaces to backup, each with cron time | |
+| `backup.annotations` | Backup pod annotations | iam.amazonaws.com/role: `cain` |
+| `backup.image.repo` | Backup image repository | `nuvo/cain` |
+| `backup.image.tag` | Backup image tag | `0.4.1` |
+| `backup.extraArgs` | Additional arguments for cain | `[]` |
+| `backup.env` | Backup environment variables | AWS_REGION: `us-east-1` |
+| `backup.resources` | Backup CPU/Memory resource requests/limits | Memory: `1Gi`, CPU: `1` |
+| `backup.destination` | Destination to store backup artifacts | `s3://bucket/cassandra` |
+| `exporter.enabled` | Enable Cassandra exporter | `false` |
+| `exporter.image.repo` | Exporter image repository | `criteord/cassandra_exporter` |
+| `exporter.image.tag` | Exporter image tag | `2.0.2` |
+| `exporter.port` | Exporter port | `5556` |
+| `exporter.jvmOpts` | Exporter additional JVM options | |
+| `affinity` | Kubernetes node affinity | `{}` |
+| `tolerations` | Kubernetes node tolerations | `[]` |
+
+
+## Scale cassandra
+When you want to change the cluster size of your cassandra, you can use the helm upgrade command.
+
+```bash
+helm upgrade --set config.cluster_size=5 cassandra incubator/cassandra
+```
+
+## Get cassandra status
+You can get your cassandra cluster status by running the command
+
+```bash
+kubectl exec -it --namespace cassandra $(kubectl get pods --namespace cassandra -l app=cassandra-cassandra -o jsonpath='{.items[0].metadata.name}') nodetool status
+```
+
+Output
+```bash
+Datacenter: asia-east1
+======================
+Status=Up/Down
+|/ State=Normal/Leaving/Joining/Moving
+-- Address Load Tokens Owns (effective) Host ID Rack
+UN 10.8.1.11 108.45 KiB 256 66.1% 410cc9da-8993-4dc2-9026-1dd381874c54 a
+UN 10.8.4.12 84.08 KiB 256 68.7% 96e159e1-ef94-406e-a0be-e58fbd32a830 c
+UN 10.8.3.6 103.07 KiB 256 65.2% 1a42b953-8728-4139-b070-b855b8fff326 b
+```
+
+## Benchmark
+You can use [cassandra-stress](https://docs.datastax.com/en/cassandra/3.0/cassandra/tools/toolsCStress.html) tool to run the benchmark on the cluster by the following command
+
+```bash
+kubectl exec -it --namespace cassandra $(kubectl get pods --namespace cassandra -l app=cassandra-cassandra -o jsonpath='{.items[0].metadata.name}') cassandra-stress
+```
+
+Example of `cassandra-stress` argument
+ - Run both read and write with ration 9:1
+ - Operator total 1 million keys with uniform distribution
+ - Use QUORUM for read/write
+ - Generate 50 threads
+ - Generate result in graph
+ - Use NetworkTopologyStrategy with replica factor 2
+
+```bash
+cassandra-stress mixed ratio\(write=1,read=9\) n=1000000 cl=QUORUM -pop dist=UNIFORM\(1..1000000\) -mode native cql3 -rate threads=50 -log file=~/mixed_autorate_r9w1_1M.log -graph file=test2.html title=test revision=test2 -schema "replication(strategy=NetworkTopologyStrategy, factor=2)"
+```
--- /dev/null
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: generic
+provisioner: kubernetes.io/gce-pd
+parameters:
+ type: pd-ssd
--- /dev/null
+Cassandra CQL can be accessed via port {{ .Values.config.ports.cql }} on the following DNS name from within your cluster:
+Cassandra Thrift can be accessed via port {{ .Values.config.ports.thrift }} on the following DNS name from within your cluster:
+
+If you want to connect to the remote instance with your local Cassandra CQL cli. To forward the API port to localhost:9042 run the following:
+- kubectl port-forward --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{ .items[0].metadata.name }') 9042:{{ .Values.config.ports.cql }}
+
+If you want to connect to the Cassandra CQL run the following:
+{{- if contains "NodePort" .Values.service.type }}
+- export CQL_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "cassandra.fullname" . }})
+- export CQL_HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+- cqlsh $CQL_HOST $CQL_PORT
+
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "cassandra.fullname" . }}'
+- export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "cassandra.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+- echo cqlsh $SERVICE_IP
+{{- else if contains "ClusterIP" .Values.service.type }}
+- kubectl port-forward --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "cassandra.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 9042:{{ .Values.config.ports.cql }}
+ echo cqlsh 127.0.0.1 9042
+{{- end }}
+
+You can also see the cluster status by run the following:
+- kubectl exec -it --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{.items[0].metadata.name}') nodetool status
+
+To tail the logs for the Cassandra pod run the following:
+- kubectl logs -f --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{ .items[0].metadata.name }')
+
+{{- if not .Values.persistence.enabled }}
+
+Note that the cluster is running with node-local storage instead of PersistentVolumes. In order to prevent data loss,
+pods will be decommissioned upon termination. Decommissioning may take some time, so you might also want to adjust the
+pod termination gace period, which is currently set to {{ .Values.podSettings.terminationGracePeriodSeconds }} seconds.
+
+{{- end}}
--- /dev/null
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "cassandra.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "cassandra.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "cassandra.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "cassandra.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "cassandra.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
--- /dev/null
+{{- if .Values.backup.enabled }}
+{{- $release := .Release }}
+{{- $values := .Values }}
+{{- $backup := $values.backup }}
+{{- range $index, $schedule := $backup.schedule }}
+---
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+ name: {{ template "cassandra.fullname" $ }}-backup-{{ $schedule.keyspace | replace "_" "-" }}
+ labels:
+ app: {{ template "cassandra.name" $ }}-cain
+ chart: {{ template "cassandra.chart" $ }}
+ release: "{{ $release.Name }}"
+ heritage: "{{ $release.Service }}"
+spec:
+ schedule: {{ $schedule.cron | quote }}
+ concurrencyPolicy: Forbid
+ startingDeadlineSeconds: 120
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ annotations:
+ {{ toYaml $backup.annotations }}
+ spec:
+ restartPolicy: OnFailure
+ serviceAccountName: {{ template "cassandra.serviceAccountName" $ }}
+ containers:
+ - name: cassandra-backup
+ image: "{{ $backup.image.repos }}:{{ $backup.image.tag }}"
+ command: ["cain"]
+ args:
+ - backup
+ - --namespace
+ - {{ $release.Namespace }}
+ - --selector
+ - release={{ $release.Name }},app={{ template "cassandra.name" $ }}
+ - --keyspace
+ - {{ $schedule.keyspace }}
+ - --dst
+ - {{ $backup.destination }}
+ {{- with $backup.extraArgs }}
+{{ toYaml . | indent 12 }}
+ {{- end }}
+ {{- with $backup.env }}
+ env:
+{{ toYaml . | indent 12 }}
+ {{- end }}
+ {{- with $backup.resources }}
+ resources:
+{{ toYaml . | indent 14 }}
+ {{- end }}
+ affinity:
+ podAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - {{ template "cassandra.fullname" $ }}
+ - key: release
+ operator: In
+ values:
+ - {{ $release.Name }}
+ topologyKey: "kubernetes.io/hostname"
+ {{- with $values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 10 }}
+ {{- end }}
+{{- end }}
+{{- end }}
--- /dev/null
+{{- if .Values.backup.enabled }}
+{{- if .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "cassandra.serviceAccountName" . }}
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ template "cassandra.chart" . }}
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+---
+{{- end }}
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ template "cassandra.fullname" . }}-backup
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ template "cassandra.chart" . }}
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+rules:
+- apiGroups: [""]
+ resources: ["pods", "pods/log"]
+ verbs: ["get", "list"]
+- apiGroups: [""]
+ resources: ["pods/exec"]
+ verbs: ["create"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ template "cassandra.fullname" . }}-backup
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ template "cassandra.chart" . }}
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "cassandra.fullname" . }}-backup
+subjects:
+- kind: ServiceAccount
+ name: {{ template "cassandra.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end }}
+{{- end }}
--- /dev/null
+{{- if .Values.configOverrides }}
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ template "cassandra.name" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+{{ toYaml .Values.configOverrides | indent 2 }}
+{{- end }}
--- /dev/null
+{{- if .Values.podDisruptionBudget -}}
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ name: {{ template "cassandra.fullname" . }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "cassandra.name" . }}
+ release: {{ .Release.Name }}
+{{ toYaml .Values.podDisruptionBudget | indent 2 }}
+{{- end -}}
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "cassandra.fullname" . }}
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ template "cassandra.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ clusterIP: None
+ type: {{ .Values.service.type }}
+ ports:
+ - name: intra
+ port: 7000
+ targetPort: 7000
+ - name: tls
+ port: 7001
+ targetPort: 7001
+ - name: jmx
+ port: 7199
+ targetPort: 7199
+ - name: cql
+ port: {{ default 9042 .Values.config.ports.cql }}
+ targetPort: {{ default 9042 .Values.config.ports.cql }}
+ - name: thrift
+ port: {{ default 9160 .Values.config.ports.thrift }}
+ targetPort: {{ default 9160 .Values.config.ports.thrift }}
+ {{- if .Values.config.ports.agent }}
+ - name: agent
+ port: {{ .Values.config.ports.agent }}
+ targetPort: {{ .Values.config.ports.agent }}
+ {{- end }}
+ selector:
+ app: {{ template "cassandra.name" . }}
+ release: {{ .Release.Name }}
--- /dev/null
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ template "cassandra.fullname" . }}
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ template "cassandra.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "cassandra.name" . }}
+ release: {{ .Release.Name }}
+ serviceName: {{ template "cassandra.fullname" . }}
+ replicas: {{ .Values.config.cluster_size }}
+ podManagementPolicy: {{ .Values.podManagementPolicy }}
+ updateStrategy:
+ type: {{ .Values.updateStrategy.type }}
+ template:
+ metadata:
+ labels:
+ app: {{ template "cassandra.name" . }}
+ release: {{ .Release.Name }}
+{{- if .Values.podLabels }}
+{{ toYaml .Values.podLabels | indent 8 }}
+{{- end }}
+{{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+{{- end }}
+ spec:
+ hostNetwork: {{ .Values.hostNetwork }}
+{{- if .Values.selector }}
+{{ toYaml .Values.selector | indent 6 }}
+{{- end }}
+ {{- if .Values.securityContext.enabled }}
+ securityContext:
+ fsGroup: {{ .Values.securityContext.fsGroup }}
+ runAsUser: {{ .Values.securityContext.runAsUser }}
+ {{- end }}
+{{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+{{- end }}
+{{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+{{- end }}
+ containers:
+{{- if .Values.exporter.enabled }}
+ - name: cassandra-exporter
+ image: "{{ .Values.exporter.image.repo }}:{{ .Values.exporter.image.tag }}"
+ env:
+ - name: CASSANDRA_EXPORTER_CONFIG_listenPort
+ value: {{ .Values.exporter.port | quote }}
+ - name: JVM_OPTS
+ value: {{ .Values.exporter.jvmOpts | quote }}
+ ports:
+ - name: metrics
+ containerPort: {{ .Values.exporter.port }}
+ protocol: TCP
+ - name: jmx
+ containerPort: 5555
+ livenessProbe:
+ tcpSocket:
+ port: {{ .Values.exporter.port }}
+ readinessProbe:
+ httpGet:
+ path: /metrics
+ port: {{ .Values.exporter.port }}
+ initialDelaySeconds: 20
+ timeoutSeconds: 45
+{{- end }}
+ - name: {{ template "cassandra.fullname" . }}
+ image: "{{ .Values.image.repo }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+{{- if .Values.commandOverrides }}
+ command: {{ .Values.commandOverrides }}
+{{- end }}
+{{- if .Values.argsOverrides }}
+ args: {{ .Values.argsOverrides }}
+{{- end }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ env:
+ {{- $seed_size := default 1 .Values.config.seed_size | int -}}
+ {{- $global := . }}
+ - name: CASSANDRA_SEEDS
+ {{- if .Values.hostNetwork }}
+ value: {{ required "You must fill \".Values.config.seeds\" with list of Cassandra seeds when hostNetwork is set to true" .Values.config.seeds | quote }}
+ {{- else }}
+ value: "{{- range $i, $e := until $seed_size }}{{ template "cassandra.fullname" $global }}-{{ $i }}.{{ template "cassandra.fullname" $global }}.{{ $global.Release.Namespace }}.svc.{{ $global.Values.config.cluster_domain }}{{- if (lt ( add1 $i ) $seed_size ) }},{{- end }}{{- end }}"
+ {{- end }}
+ - name: MAX_HEAP_SIZE
+ value: {{ default "8192M" .Values.config.max_heap_size | quote }}
+ - name: HEAP_NEWSIZE
+ value: {{ default "200M" .Values.config.heap_new_size | quote }}
+ - name: CASSANDRA_ENDPOINT_SNITCH
+ value: {{ default "SimpleSnitch" .Values.config.endpoint_snitch | quote }}
+ - name: CASSANDRA_CLUSTER_NAME
+ value: {{ default "Cassandra" .Values.config.cluster_name | quote }}
+ - name: CASSANDRA_DC
+ value: {{ default "DC1" .Values.config.dc_name | quote }}
+ - name: CASSANDRA_RACK
+ value: {{ default "RAC1" .Values.config.rack_name | quote }}
+ - name: CASSANDRA_START_RPC
+ value: {{ default "false" .Values.config.start_rpc | quote }}
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ {{- range $key, $value := .Values.env }}
+ - name: {{ $key | quote }}
+ value: {{ $value | quote }}
+ {{- end }}
+ livenessProbe:
+ exec:
+ command: [ "/bin/sh", "-c", "nodetool status" ]
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+ readinessProbe:
+ exec:
+ command: [ "/bin/sh", "-c", "nodetool status | grep -E \"^UN\\s+${POD_IP}\"" ]
+ initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+ ports:
+ - name: intra
+ containerPort: 7000
+ - name: tls
+ containerPort: 7001
+ - name: jmx
+ containerPort: 7199
+ - name: cql
+ containerPort: {{ default 9042 .Values.config.ports.cql }}
+ - name: thrift
+ containerPort: {{ default 9160 .Values.config.ports.thrift }}
+ {{- if .Values.config.ports.agent }}
+ - name: agent
+ containerPort: {{ .Values.config.ports.agent }}
+ {{- end }}
+ volumeMounts:
+ - name: data
+ mountPath: /var/lib/cassandra
+{{- range $key, $value := .Values.configOverrides }}
+ - name: cassandra-config-{{ $key | replace "." "-" }}
+ mountPath: /etc/cassandra/{{ $key }}
+ subPath: {{ $key }}
+{{- end }}
+ {{- if not .Values.persistence.enabled }}
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/sh", "-c", "exec nodetool decommission"]
+ {{- end }}
+ terminationGracePeriodSeconds: {{ default 30 .Values.podSettings.terminationGracePeriodSeconds }}
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ - name: {{ .Values.image.pullSecrets }}
+ {{- end }}
+{{- if or .Values.configOverrides (not .Values.persistence.enabled) }}
+ volumes:
+{{- end }}
+{{- range $key, $value := .Values.configOverrides }}
+ - configMap:
+ name: cassandra
+ name: cassandra-config-{{ $key | replace "." "-" }}
+{{- end }}
+{{- if not .Values.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+{{- else }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ labels:
+ app: {{ template "cassandra.name" . }}
+ chart: {{ template "cassandra.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+ {{- if .Values.persistence.storageClass }}
+ {{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+ {{- end }}
+ {{- end }}
+{{- end }}
--- /dev/null
+## Cassandra image version
+## ref: https://hub.docker.com/r/library/cassandra/
+image:
+ repo: cassandra
+ tag: 3.11.3
+ pullPolicy: IfNotPresent
+ ## Specify ImagePullSecrets for Pods
+ ## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+ # pullSecrets: myregistrykey
+
+## Specify a service type
+## ref: http://kubernetes.io/docs/user-guide/services/
+service:
+ type: ClusterIP
+
+## Persist data to a persistent volume
+persistence:
+ enabled: true
+ ## cassandra data Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ # storageClass: "-"
+ accessMode: ReadWriteOnce
+ size: 10Gi
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+## Minimum memory for development is 4GB and 2 CPU cores
+## Minimum memory for production is 8GB and 4 CPU cores
+## ref: http://docs.datastax.com/en/archived/cassandra/2.0/cassandra/architecture/architecturePlanningHardware_c.html
+resources: {}
+ # requests:
+ # memory: 4Gi
+ # cpu: 2
+ # limits:
+ # memory: 4Gi
+ # cpu: 2
+
+## Change cassandra configuration parameters below:
+## ref: http://docs.datastax.com/en/cassandra/3.0/cassandra/configuration/configCassandra_yaml.html
+## Recommended max heap size is 1/2 of system memory
+## Recommended heap new size is 1/4 of max heap size
+## ref: http://docs.datastax.com/en/cassandra/3.0/cassandra/operations/opsTuneJVM.html
+config:
+ cluster_domain: cluster.local
+ cluster_name: cassandra
+ cluster_size: 3
+ seed_size: 2
+ num_tokens: 256
+ # If you want Cassandra to use this datacenter and rack name,
+ # you need to set endpoint_snitch to GossipingPropertyFileSnitch.
+ # Otherwise, these values are ignored and datacenter1 and rack1
+ # are used.
+ dc_name: DC1
+ rack_name: RAC1
+ endpoint_snitch: SimpleSnitch
+ max_heap_size: 2048M
+ heap_new_size: 512M
+ start_rpc: false
+ ports:
+ cql: 9042
+ thrift: 9160
+ # If a JVM Agent is in place
+ # agent: 61621
+
+## Cassandra config files overrides
+configOverrides: {}
+
+## Cassandra docker command overrides
+commandOverrides: []
+
+## Cassandra docker args overrides
+argsOverrides: []
+
+## Custom env variables.
+## ref: https://hub.docker.com/_/cassandra/
+env: {}
+
+## Liveness and Readiness probe values.
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+livenessProbe:
+ initialDelaySeconds: 90
+ periodSeconds: 30
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+readinessProbe:
+ initialDelaySeconds: 90
+ periodSeconds: 30
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+
+## Configure node selector. Edit code below for adding selector to pods
+## ref: https://kubernetes.io/docs/user-guide/node-selection/
+# selector:
+ # nodeSelector:
+ # cloud.google.com/gke-nodepool: pool-db
+
+## Additional pod annotations
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+podAnnotations: {}
+
+## Additional pod labels
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+podLabels: {}
+
+## Additional pod-level settings
+podSettings:
+ # Change this to give pods more time to properly leave the cluster when not using persistent storage.
+ terminationGracePeriodSeconds: 30
+
+## Pod distruption budget
+podDisruptionBudget: {}
+ # maxUnavailable: 1
+ # minAvailable: 2
+
+podManagementPolicy: OrderedReady
+updateStrategy:
+ type: OnDelete
+
+## Pod Security Context
+securityContext:
+ enabled: false
+ fsGroup: 999
+ runAsUser: 999
+
+## Affinity for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+affinity: {}
+
+## Node tolerations for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+tolerations: []
+
+rbac:
+ # Specifies whether RBAC resources should be created
+ create: true
+
+serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use.
+ # If not set and create is true, a name is generated using the fullname template
+ # name:
+
+# Use host network for Cassandra pods
+# You must pass seed list into config.seeds property if set to true
+hostNetwork: false
+
+## Backup cronjob configuration
+## Ref: https://github.com/nuvo/cain
+backup:
+ enabled: false
+
+ # Schedule to run jobs. Must be in cron time format
+ # Ref: https://crontab.guru/
+ schedule:
+ - keyspace: keyspace1
+ cron: "0 7 * * *"
+ - keyspace: keyspace2
+ cron: "30 7 * * *"
+
+ annotations:
+ # Example for authorization to AWS S3 using kube2iam
+ # Can also be done using environment variables
+ iam.amazonaws.com/role: cain
+
+ image:
+ repos: nuvo/cain
+ tag: 0.4.1
+
+ # Additional arguments for cain
+ # Ref: https://github.com/nuvo/cain#usage
+ extraArgs: []
+
+ # Add additional environment variables
+ env:
+ # Example environment variable required for AWS credentials chain
+ - name: AWS_REGION
+ value: us-east-1
+
+ resources:
+ requests:
+ memory: 1Gi
+ cpu: 1
+ limits:
+ memory: 1Gi
+ cpu: 1
+
+ # Destination to store the backup artifacts
+ # Supported cloud storage services: AWS S3, Minio S3, Azure Blob Storage
+ # Additional support can added. Visit this repository for details
+ # Ref: https://github.com/nuvo/skbn
+ destination: s3://bucket/cassandra
+
+## Cassandra exported configuration
+## ref: https://github.com/criteo/cassandra_exporter
+exporter:
+ enabled: false
+ image:
+ repo: criteord/cassandra_exporter
+ tag: 2.0.2
+ port: 5556
+ jvmOpts: ""
--- /dev/null
+.git
+OWNERS
\ No newline at end of file
--- /dev/null
+appVersion: 10.6.0
+description: Chart for PostgreSQL, an object-relational database management system
+ (ORDBMS) with an emphasis on extensibility and on standards-compliance.
+engine: gotpl
+home: https://www.postgresql.org/
+icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png
+keywords:
+- postgresql
+- postgres
+- database
+- sql
+- replication
+- cluster
+maintainers:
+- email: containers@bitnami.com
+ name: Bitnami
+- email: cedric@desaintmartin.fr
+ name: desaintmartin
+name: postgresql
+sources:
+- https://github.com/bitnami/bitnami-docker-postgresql
+version: 3.9.5
--- /dev/null
+# PostgreSQL
+
+[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance.
+
+## TL;DR;
+
+```console
+$ helm install stable/postgresql
+```
+
+## Introduction
+
+This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.10+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+$ helm install --name my-release stable/postgresql
+```
+
+The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Configuration
+
+The following tables lists the configurable parameters of the PostgreSQL chart and their default values.
+
+| Parameter | Description | Default |
+|-----------------------------------------------|------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------|
+| `global.imageRegistry` | Global Docker Image registry | `nil` |
+| `image.registry` | PostgreSQL Image registry | `docker.io` |
+| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` |
+| `image.tag` | PostgreSQL Image tag | `{VERSION}` |
+| `image.pullPolicy` | PostgreSQL Image pull policy | `Always` |
+| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) |
+| `image.debug` | Specify if debug values should be set | `false` |
+| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
+| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` |
+| `volumePermissions.image.tag` | Init container volume-permissions image tag | `latest` |
+| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` |
+| `volumePermissions.securityContext.runAsUser` | User ID for the init container | `0` |
+| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` |
+| `replication.enabled` | Would you like to enable replication | `false` |
+| `replication.user` | Replication user | `repl_user` |
+| `replication.password` | Replication user password | `repl_password` |
+| `replication.slaveReplicas` | Number of slaves replicas | `1` |
+| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` |
+| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` |
+| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` |
+| `existingSecret` | Name of existing secret to use for PostgreSQL passwords | `nil` |
+| `postgresqlUsername` | PostgreSQL admin user | `postgres` |
+| `postgresqlPassword` | PostgreSQL admin password | _random 10 character alphanumeric string_ |
+| `postgresqlDatabase` | PostgreSQL database | `nil` |
+| `postgresqlConfiguration` | Runtime Config Parameters | `nil` |
+| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` |
+| `pgHbaConfiguration` | Content of pg\_hba.conf | `nil (do not create pg_hba.conf)` |
+| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`) | `nil` |
+| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files | `nil` |
+| `initdbScripts` | List of initdb scripts | `nil` |
+| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `nil` |
+| `service.type` | Kubernetes Service type | `ClusterIP` |
+| `service.port` | PostgreSQL port | `5432` |
+| `service.nodePort` | Kubernetes Service nodePort | `nil` |
+| `service.annotations` | Annotations for PostgreSQL service | {} |
+| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` |
+| `persistence.enabled` | Enable persistence using PVC | `true` |
+| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` | `nil` |
+| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` |
+| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` |
+| `persistence.accessMode` | PVC Access Mode for PostgreSQL volume | `ReadWriteOnce` |
+| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` |
+| `persistence.annotations` | Annotations for the PVC | `{}` |
+| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` |
+| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` |
+| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` |
+| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` |
+| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` |
+| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` |
+| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` |
+| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` |
+| `securityContext.enabled` | Enable security context | `true` |
+| `securityContext.fsGroup` | Group ID for the container | `1001` |
+| `securityContext.runAsUser` | User ID for the container | `1001` |
+| `livenessProbe.enabled` | Would you like a livessProbed to be enabled | `true` |
+| `networkPolicy.enabled` | Enable NetworkPolicy | `false` |
+| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
+| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 |
+| `livenessProbe.periodSeconds` | How often to perform the probe | 10 |
+| `livenessProbe.timeoutSeconds` | When the probe times out | 5 |
+| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 |
+| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 |
+| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` |
+| `readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 |
+| `readinessProbe.periodSeconds` | How often to perform the probe | 10 |
+| `readinessProbe.timeoutSeconds` | When the probe times out | 5 |
+| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 |
+| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 |
+| `metrics.enabled` | Start a prometheus exporter | `false` |
+| `metrics.service.type` | Kubernetes Service type | `ClusterIP` |
+| `service.clusterIP` | Static clusterIP or None for headless services | `nil` |
+| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{}` |
+| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` |
+| `metrics.image.registry` | PostgreSQL Image registry | `docker.io` |
+| `metrics.image.repository` | PostgreSQL Image name | `wrouesnel/postgres_exporter` |
+| `metrics.image.tag` | PostgreSQL Image tag | `{VERSION}` |
+| `metrics.image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` |
+| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) |
+| `extraEnv` | Any extra environment variables you would like to pass on to the pod | `{}` |
+| `updateStrategy` | Update strategy policy | `{type: "onDelete"}` |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install --name my-release \
+ --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \
+ stable/postgresql
+```
+
+The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install --name my-release -f values.yaml stable/postgresql
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+### postgresql.conf / pg_hba.conf files as configMap
+
+This helm chart also supports to customize the whole configuration file.
+
+Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server.
+
+Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}.
+
+In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options.
+
+### Allow settings to be loaded from files other than the default `postgresql.conf`
+
+If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory.
+Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`.
+
+Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option.
+
+## Initialize a fresh instance
+
+The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap.
+
+Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict.
+
+In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options.
+
+The allowed extensions are `.sh`, `.sql` and `.sql.gz`.
+
+## Production and horizontal scaling
+
+The following repo contains the recommended production settings for PostgreSQL server in an alternative [values file](values-production.yaml). Please read carefully the comments in the values-production.yaml file to set up your environment
+
+To horizontally scale this chart, first download the [values-production.yaml](values-production.yaml) file to your local folder, then:
+
+```console
+$ helm install --name my-release -f ./values-production.yaml stable/postgresql
+$ kubectl scale statefulset my-postgresql-slave --replicas=3
+```
+
+## Persistence
+
+The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container.
+
+Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
+See the [Configuration](#configuration) section to configure the PVC or to disable persistence.
+
+## Metrics
+
+The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml).
+
+The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details.
+
+## NetworkPolicy
+
+To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`.
+
+For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace:
+
+```console
+$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}"
+```
+
+With NetworkPolicy enabled, traffic will be limited to just port 5432.
+
+For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL.
+This label will be displayed in the output of a successful install.
+
+## Upgrade
+
+### 3.0.0
+
+This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods.
+It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride.
+
+#### Breaking changes
+
+- `affinty` has been renamed to `master.affinity` and `slave.affinity`.
+- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`.
+- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`.
+
+### 2.0.0
+
+In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps:
+
+ - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running
+
+ ```console
+$ kubectl get svc
+ ```
+
+- Install (not upgrade) the new version
+
+```console
+$ helm repo update
+$ helm install --name my-release stable/postgresql
+```
+
+- Connect to the new pod (you can obtain the name by running `kubectl get pods`):
+
+```console
+$ kubectl exec -it NAME bash
+```
+
+- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart:
+
+```console
+$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql
+```
+
+After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`).
+This operation could take some time depending on the database size.
+
+- Once you have the backup file, you can restore it with a command like the one below:
+
+```console
+$ psql -U postgres DATABASE_NAME < /tmp/backup.sql
+```
+
+In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt).
+
+If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below.
+
+```console
+$ psql -U postgres
+postgres=# drop database DATABASE_NAME;
+postgres=# create database DATABASE_NAME;
+postgres=# create user USER_NAME;
+postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD';
+postgres=# grant all privileges on database DATABASE_NAME to USER_NAME;
+postgres=# alter database DATABASE_NAME owner to USER_NAME;
+```
--- /dev/null
+Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map.
--- /dev/null
+If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files.
+These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`.
+
+More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file).
--- /dev/null
+You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image.
+
+More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository.
\ No newline at end of file
--- /dev/null
+{{- if contains .Values.service.type "LoadBalancer" }}
+{{- if not .Values.postgresqlPassword }}
+-------------------------------------------------------------------------------
+ WARNING
+
+ By specifying "serviceType=LoadBalancer" and not specifying "postgresqlPassword"
+ you have most likely exposed the PostgreSQL service externally without any
+ authentication mechanism.
+
+ For security reasons, we strongly suggest that you switch to "ClusterIP" or
+ "NodePort". As an alternative, you can also specify a valid password on the
+ "postgresqlPassword" parameter.
+
+-------------------------------------------------------------------------------
+{{- end }}
+{{- end }}
+
+** Please be patient while the chart is being deployed **
+
+PostgreSQL can be accessed via port 5432 on the following DNS name from within your cluster:
+
+ {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection
+{{- if .Values.replication.enabled }}
+ {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection
+{{- end }}
+To get the password for "{{ .Values.postgresqlUsername }}" run:
+
+ export POSTGRESQL_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{ else }}{{ template "postgresql.fullname" . }}{{ end }} -o jsonpath="{.data.postgresql-password}" | base64 --decode)
+
+To connect to your database run the following command:
+
+ kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image bitnami/postgresql --env="PGPASSWORD=$POSTGRESQL_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}
+ --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }}
+
+{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}
+Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster.
+{{- end }}
+
+To connect to your database from outside the cluster execute the following commands:
+
+{{- if contains "NodePort" .Values.service.type }}
+
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }})
+ {{ if .Values.postgresqlPassword }}PGPASSWORD="{{ .Values.postgresqlPassword}}" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }}
+
+{{- else if contains "LoadBalancer" .Values.service.type }}
+
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}'
+
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+ {{ if .Values.postgresqlPassword }}PGPASSWORD="{{ .Values.postgresqlPassword}}" {{ end }}psql --host $SERVICE_IP --port {{ .Values.service.port }} -U {{ .Values.postgresqlUsername }}
+
+{{- else if contains "ClusterIP" .Values.service.type }}
+
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} 5432:5432 &
+ {{ if .Values.postgresqlPassword }}PGPASSWORD="{{ .Values.postgresqlPassword}}" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }}
+
+{{- end }}
--- /dev/null
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "postgresql.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "postgresql.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- printf .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "postgresql.master.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}}
+{{- if .Values.replication.enabled -}}
+{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "postgresql.networkPolicy.apiVersion" -}}
+{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}}
+"extensions/v1beta1"
+{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}}
+"networking.k8s.io/v1"
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "postgresql.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Return the proper PostgreSQL image name
+*/}}
+{{- define "postgresql.image" -}}
+{{- $registryName := .Values.image.registry -}}
+{{- $repositoryName := .Values.image.repository -}}
+{{- $tag := .Values.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+ {{- if .Values.global.imageRegistry }}
+ {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+ {{- else -}}
+ {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+ {{- end -}}
+{{- else -}}
+ {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper image name to change the volume permissions
+*/}}
+{{- define "postgresql.volumePermissions.image" -}}
+{{- $registryName := .Values.volumePermissions.image.registry -}}
+{{- $repositoryName := .Values.volumePermissions.image.repository -}}
+{{- $tag := .Values.volumePermissions.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+ {{- if .Values.global.imageRegistry }}
+ {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+ {{- else -}}
+ {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+ {{- end -}}
+{{- else -}}
+ {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+
+{{/*
+Return the proper PostgreSQL metrics image name
+*/}}
+{{- define "metrics.image" -}}
+{{- $registryName := default "docker.io" .Values.metrics.image.registry -}}
+{{- $tag := default "latest" .Values.metrics.image.tag | toString -}}
+{{- printf "%s/%s:%s" $registryName .Values.metrics.image.repository $tag -}}
+{{- end -}}
+
+{{/*
+Get the password secret.
+*/}}
+{{- define "postgresql.secretName" -}}
+{{- if .Values.existingSecret -}}
+{{- printf "%s" .Values.existingSecret -}}
+{{- else -}}
+{{- printf "%s" (include "postgresql.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the configuration ConfigMap name.
+*/}}
+{{- define "postgresql.configurationCM" -}}
+{{- if .Values.configurationConfigMap -}}
+{{- printf "%s" .Values.configurationConfigMap -}}
+{{- else -}}
+{{- printf "%s-configuration" (include "postgresql.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the extended configuration ConfigMap name.
+*/}}
+{{- define "postgresql.extendedConfigurationCM" -}}
+{{- if .Values.extendedConfConfigMap -}}
+{{- printf "%s" .Values.extendedConfConfigMap -}}
+{{- else -}}
+{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the initialization scripts ConfigMap name.
+*/}}
+{{- define "postgresql.initdbScriptsCM" -}}
+{{- if .Values.initdbScriptsConfigMap -}}
+{{- printf "%s" .Values.initdbScriptsConfigMap -}}
+{{- else -}}
+{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}}
+{{- end -}}
+{{- end -}}
--- /dev/null
+{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "postgresql.fullname" . }}-configuration
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+data:
+{{- if (.Files.Glob "files/postgresql.conf") }}
+{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }}
+{{- else if .Values.postgresqlConfiguration }}
+ postgresql.conf: |
+{{- range $key, $value := default dict .Values.postgresqlConfiguration }}
+ {{ $key | snakecase }}={{ $value }}
+{{- end }}
+{{- end }}
+{{- if (.Files.Glob "files/pg_hba.conf") }}
+{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }}
+{{- else if .Values.pgHbaConfiguration }}
+ pg_hba.conf: |
+{{ .Values.pgHbaConfiguration | indent 4 }}
+{{- end }}
+{{ end }}
--- /dev/null
+{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "postgresql.fullname" . }}-extended-configuration
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+data:
+{{- with .Files.Glob "files/conf.d/*.conf" }}
+{{ .AsConfig | indent 2 }}
+{{- end }}
+{{ with .Values.postgresqlExtendedConf }}
+ override.conf: |
+{{- range $key, $value := . }}
+ {{ $key | snakecase }}={{ $value }}
+{{- end }}
+{{- end }}
+{{- end }}
--- /dev/null
+{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "postgresql.fullname" . }}-init-scripts
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }}
+binaryData:
+{{- range $path, $bytes := . }}
+ {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }}
+{{- end }}
+{{- end }}
+data:
+{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }}
+{{ .AsConfig | indent 2 }}
+{{- end }}
+{{- with .Values.initdbScripts }}
+{{ toYaml . | indent 2 }}
+{{- end }}
+{{- end }}
--- /dev/null
+{{- if .Values.metrics.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "postgresql.fullname" . }}-metrics
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+ annotations:
+{{ toYaml .Values.metrics.service.annotations | indent 4 }}
+spec:
+ type: {{ .Values.metrics.service.type }}
+ {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }}
+ {{- end }}
+ ports:
+ - name: metrics
+ port: 9187
+ targetPort: metrics
+ selector:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name }}
+ role: master
+{{- end }}
--- /dev/null
+{{- if .Values.networkPolicy.enabled }}
+kind: NetworkPolicy
+apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }}
+metadata:
+ name: {{ template "postgresql.fullname" . }}
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+spec:
+ podSelector:
+ matchLabels:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name | quote }}
+ ingress:
+ # Allow inbound connections
+ - ports:
+ - port: 5432
+ {{- if not .Values.networkPolicy.allowExternal }}
+ from:
+ - podSelector:
+ matchLabels:
+ {{ template "postgresql.fullname" . }}-client: "true"
+ {{- end }}
+ # Allow prometheus scrapes
+ - ports:
+ - port: 9187
+{{- end }}
--- /dev/null
+{{- if not .Values.existingSecret }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "postgresql.fullname" . }}
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+type: Opaque
+data:
+ {{- if .Values.postgresqlPassword }}
+ postgresql-password: {{ .Values.postgresqlPassword | b64enc | quote }}
+ {{- else }}
+ postgresql-password: {{ randAlphaNum 10 | b64enc | quote }}
+ {{- end }}
+ {{- if .Values.replication.enabled }}
+ {{- if .Values.replication.password }}
+ postgresql-replication-password: {{ .Values.replication.password | b64enc | quote }}
+ {{- else }}
+ postgresql-replication-password: {{ randAlphaNum 10 | b64enc | quote }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
--- /dev/null
+{{- if .Values.replication.enabled }}
+apiVersion: apps/v1beta2
+kind: StatefulSet
+metadata:
+ name: "{{ template "postgresql.fullname" . }}-slave"
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+spec:
+ serviceName: {{ template "postgresql.fullname" . }}-headless
+ replicas: {{ .Values.replication.slaveReplicas }}
+ selector:
+ matchLabels:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name | quote }}
+ role: slave
+ template:
+ metadata:
+ name: {{ template "postgresql.fullname" . }}
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+ role: slave
+ spec:
+ {{- if .Values.securityContext.enabled }}
+ securityContext:
+ fsGroup: {{ .Values.securityContext.fsGroup }}
+ runAsUser: {{ .Values.securityContext.runAsUser }}
+ {{- end }}
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end}}
+ {{- end }}
+ {{- if .Values.slave.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.slave.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.slave.affinity }}
+ affinity:
+{{ toYaml .Values.slave.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.slave.tolerations }}
+ tolerations:
+{{ toYaml .Values.slave.tolerations | indent 8 }}
+ {{- end }}
+ {{- if .Values.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
+ {{- end }}
+ {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
+ initContainers:
+ - name: init-chmod-data
+ image: {{ template "postgresql.volumePermissions.image" . }}
+ imagePullPolicy: "{{ .Values.volumePermissions.image.pullPolicy }}"
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ command:
+ - sh
+ - -c
+ - |
+ chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /bitnami
+ if [ -d /bitnami/postgresql/data ]; then
+ chmod 0700 /bitnami/postgresql/data;
+ fi
+ securityContext:
+ runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/postgresql
+ {{- end }}
+ containers:
+ - name: {{ template "postgresql.fullname" . }}
+ image: {{ template "postgresql.image" . }}
+ imagePullPolicy: "{{ .Values.image.pullPolicy }}"
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ env:
+ {{- if .Values.image.debug}}
+ - name: BASH_DEBUG
+ value: "1"
+ - name: NAMI_DEBUG
+ value: "1"
+ {{- end }}
+ - name: POSTGRESQL_REPLICATION_MODE
+ value: "slave"
+ - name: POSTGRESQL_REPLICATION_USER
+ value: {{ .Values.replication.user | quote }}
+ {{- if .Values.usePasswordFile }}
+ - name: POSTGRESQL_REPLICATION_PASSWORD_FILE
+ value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password"
+ {{- else }}
+ - name: POSTGRESQL_REPLICATION_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "postgresql.secretName" . }}
+ key: postgresql-replication-password
+ {{- end }}
+ - name: POSTGRESQL_CLUSTER_APP_NAME
+ value: {{ .Values.replication.applicationName }}
+ - name: POSTGRESQL_MASTER_HOST
+ value: {{ template "postgresql.fullname" . }}
+ - name: POSTGRESQL_MASTER_PORT_NUMBER
+ value: {{ .Values.service.port | quote }}
+ ports:
+ - name: postgresql
+ containerPort: {{ .Values.service.port }}
+ {{- if .Values.livenessProbe.enabled }}
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ {{- if .Values.postgresqlDatabase }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -d {{ .Values.postgresqlDatabase | quote }} -h localhost
+ {{- else }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -h localhost
+ {{- end }}
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.readinessProbe.enabled }}
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ {{- if .Values.postgresqlDatabase }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -d {{ .Values.postgresqlDatabase | quote }} -h localhost
+ {{- else }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -h localhost
+ {{- end }}
+ initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.usePasswordFile }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets
+ {{ end }}
+ {{- if .Values.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.persistence.mountPath }}
+ {{ end }}
+ {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.extendedConfConfigMap }}
+ - name: postgresql-extended-config
+ mountPath: /bitnami/postgresql/conf/conf.d/
+ {{- end }}
+ {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }}
+ - name: postgresql-config
+ mountPath: /bitnami/postgresql/conf
+ {{- end }}
+ volumes:
+ {{- if .Values.usePasswordFile }}
+ - name: postgresql-password
+ secret:
+ secretName: {{ template "postgresql.secretName" . }}
+ {{ end }}
+ {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}}
+ - name: postgresql-config
+ configMap:
+ name: {{ template "postgresql.configurationCM" . }}
+ {{- end }}
+ {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.extendedConfConfigMap }}
+ - name: postgresql-extended-config
+ configMap:
+ name: {{ template "postgresql.extendedConfigurationCM" . }}
+ {{- end }}
+ {{- if not .Values.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+ {{- end }}
+ updateStrategy:
+ type: {{ .Values.updateStrategy.type }}
+{{- if .Values.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ {{- with .Values.persistence.annotations }}
+ annotations:
+ {{- range $key, $value := . }}
+ {{ $key }}: {{ $value }}
+ {{- end }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+ {{- if .Values.persistence.storageClass }}
+ {{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- end }}
--- /dev/null
+apiVersion: apps/v1beta2
+kind: StatefulSet
+metadata:
+ name: {{ template "postgresql.master.fullname" . }}
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+spec:
+ serviceName: {{ template "postgresql.fullname" . }}-headless
+ replicas: 1
+ updateStrategy:
+ type: {{ .Values.updateStrategy.type }}
+ selector:
+ matchLabels:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name | quote }}
+ role: master
+ template:
+ metadata:
+ name: {{ template "postgresql.fullname" . }}
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+ role: master
+ spec:
+ {{- if .Values.securityContext.enabled }}
+ securityContext:
+ fsGroup: {{ .Values.securityContext.fsGroup }}
+ runAsUser: {{ .Values.securityContext.runAsUser }}
+ {{- end }}
+ {{- if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end}}
+ {{- range .Values.metrics.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end}}
+ {{- end }}
+ {{- if .Values.master.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.master.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.master.affinity }}
+ affinity:
+{{ toYaml .Values.master.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.master.tolerations }}
+ tolerations:
+{{ toYaml .Values.master.tolerations | indent 8 }}
+ {{- end }}
+ {{- if .Values.terminationGracePeriodSeconds }}
+ terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
+ {{- end }}
+ {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
+ initContainers:
+ - name: init-chmod-data
+ image: {{ template "postgresql.volumePermissions.image" . }}
+ imagePullPolicy: "{{ .Values.volumePermissions.image.pullPolicy }}"
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ command:
+ - sh
+ - -c
+ - |
+ chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /bitnami
+ if [ -d /bitnami/postgresql/data ]; then
+ chmod 0700 /bitnami/postgresql/data;
+ fi
+ securityContext:
+ runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/postgresql
+ {{- end }}
+ containers:
+ - name: {{ template "postgresql.fullname" . }}
+ image: {{ template "postgresql.image" . }}
+ imagePullPolicy: "{{ .Values.image.pullPolicy }}"
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ env:
+ {{- if .Values.image.debug}}
+ - name: BASH_DEBUG
+ value: "1"
+ - name: NAMI_DEBUG
+ value: "1"
+ {{- end }}
+ {{- if .Values.replication.enabled }}
+ - name: POSTGRESQL_REPLICATION_MODE
+ value: "master"
+ - name: POSTGRESQL_REPLICATION_USER
+ value: {{ .Values.replication.user | quote }}
+ {{- if .Values.usePasswordFile }}
+ - name: POSTGRESQL_REPLICATION_PASSWORD_FILE
+ value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password"
+ {{- else }}
+ - name: POSTGRESQL_REPLICATION_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "postgresql.secretName" . }}
+ key: postgresql-replication-password
+ {{- end }}
+ {{- if not (eq .Values.replication.synchronousCommit "off")}}
+ - name: POSTGRESQL_SYNCHRONOUS_COMMIT_MODE
+ value: {{ .Values.replication.synchronousCommit | quote }}
+ - name: POSTGRESQL_NUM_SYNCHRONOUS_REPLICAS
+ value: {{ .Values.replication.numSynchronousReplicas | quote }}
+ {{- end }}
+ - name: POSTGRESQL_CLUSTER_APP_NAME
+ value: {{ .Values.replication.applicationName }}
+ {{- end }}
+ - name: POSTGRESQL_USERNAME
+ value: {{ .Values.postgresqlUsername | quote }}
+ {{- if .Values.usePasswordFile }}
+ - name: POSTGRESQL_PASSWORD_FILE
+ value: "/opt/bitnami/postgresql/secrets/postgresql-password"
+ {{- else }}
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "postgresql.secretName" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.postgresqlDatabase }}
+ - name: POSTGRESQL_DATABASE
+ value: {{ .Values.postgresqlDatabase | quote }}
+ {{- end }}
+{{- if .Values.extraEnv }}
+{{ toYaml .Values.extraEnv | indent 8 }}
+{{- end }}
+ ports:
+ - name: postgresql
+ containerPort: {{ .Values.service.port }}
+ {{- if .Values.livenessProbe.enabled }}
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ {{- if .Values.postgresqlDatabase }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -d {{ .Values.postgresqlDatabase | quote }} -h localhost
+ {{- else }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -h localhost
+ {{- end }}
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.readinessProbe.enabled }}
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ {{- if .Values.postgresqlDatabase }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -d {{ .Values.postgresqlDatabase | quote }} -h localhost
+ {{- else }}
+ - exec pg_isready -U {{ .Values.postgresqlUsername | quote }} -h localhost
+ {{- end }}
+ initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+ {{- end }}
+ volumeMounts:
+ {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }}
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d
+ {{- end }}
+ {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}
+ - name: postgresql-extended-config
+ mountPath: /bitnami/postgresql/conf/conf.d/
+ {{- end }}
+ {{- if .Values.usePasswordFile }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ {{- if .Values.persistence.enabled }}
+ - name: data
+ mountPath: {{ .Values.persistence.mountPath }}
+ {{- end }}
+ {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }}
+ - name: postgresql-config
+ mountPath: /bitnami/postgresql/conf
+ {{- end }}
+{{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ template "metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ env:
+ {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase)" .Values.postgresqlDatabase }}
+ - name: DATA_SOURCE_URI
+ value: {{ printf "localhost:%d/%s?sslmode=disable" (int .Values.service.port) $database | quote }}
+ {{- if .Values.usePasswordFile }}
+ - name: DATA_SOURCE_PASS_FILE
+ value: "/opt/bitnami/postgresql/secrets/postgresql-password"
+ {{- else }}
+ - name: DATA_SOURCE_PASS
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "postgresql.secretName" . }}
+ key: postgresql-password
+ {{- end }}
+ - name: DATA_SOURCE_USER
+ value: {{ .Values.postgresqlUsername }}
+ {{- if .Values.livenessProbe.enabled }}
+ livenessProbe:
+ httpGet:
+ path: /
+ port: metrics
+ initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.readinessProbe.enabled }}
+ readinessProbe:
+ httpGet:
+ path: /
+ port: metrics
+ initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}
+ {{- end }}
+ volumeMounts:
+ {{- if .Values.usePasswordFile }}
+ - name: postgresql-password
+ mountPath: /opt/bitnami/postgresql/secrets/
+ {{- end }}
+ ports:
+ - name: metrics
+ containerPort: 9187
+ resources:
+{{ toYaml .Values.metrics.resources | indent 10 }}
+{{- end }}
+ volumes:
+ {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}}
+ - name: postgresql-config
+ configMap:
+ name: {{ template "postgresql.configurationCM" . }}
+ {{- end }}
+ {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}
+ - name: postgresql-extended-config
+ configMap:
+ name: {{ template "postgresql.extendedConfigurationCM" . }}
+ {{- end }}
+ {{- if .Values.usePasswordFile }}
+ - name: postgresql-password
+ secret:
+ secretName: {{ template "postgresql.secretName" . }}
+ {{- end }}
+ {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }}
+ - name: custom-init-scripts
+ configMap:
+ name: {{ template "postgresql.initdbScriptsCM" . }}
+ {{- end }}
+{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }}
+ - name: data
+ persistentVolumeClaim:
+ claimName: {{ .Values.persistence.existingClaim }}
+{{- else if not .Values.persistence.enabled }}
+ - name: data
+ emptyDir: {}
+{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ {{- with .Values.persistence.annotations }}
+ annotations:
+ {{- range $key, $value := . }}
+ {{ $key }}: {{ $value }}
+ {{- end }}
+ {{- end }}
+ spec:
+ accessModes:
+ {{- range .Values.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+ {{- if .Values.persistence.storageClass }}
+ {{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+ {{- end }}
+ {{- end }}
+{{- end }}
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "postgresql.fullname" . }}-headless
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: postgresql
+ port: 5432
+ targetPort: postgresql
+ selector:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name | quote }}
--- /dev/null
+{{- if .Values.replication.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "postgresql.fullname" . }}-read
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+{{- with .Values.service.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }}
+ loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+ {{- end }}
+ ports:
+ - name: postgresql
+ port: {{ .Values.service.port }}
+ targetPort: postgresql
+ {{- if .Values.service.nodePort }}
+ nodePort: {{ .Values.service.nodePort }}
+ {{- end }}
+ selector:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name | quote }}
+ role: slave
+{{- end }}
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "postgresql.fullname" . }}
+ labels:
+ app: {{ template "postgresql.name" . }}
+ chart: {{ template "postgresql.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+{{- with .Values.service.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }}
+ loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+ {{- end }}
+ {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }}
+ clusterIP: {{ .Values.service.clusterIP }}
+ {{- end }}
+ ports:
+ - name: postgresql
+ port: {{ .Values.service.port }}
+ targetPort: postgresql
+ {{- if .Values.service.nodePort }}
+ nodePort: {{ .Values.service.nodePort }}
+ {{- end }}
+ selector:
+ app: {{ template "postgresql.name" . }}
+ release: {{ .Release.Name | quote }}
+ role: master
--- /dev/null
+## Global Docker image registry
+### Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
+###
+## global:
+## imageRegistry:
+
+## Bitnami PostgreSQL image version
+## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
+##
+image:
+ registry: docker.io
+ repository: bitnami/postgresql
+ tag: 10.6.0
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: Always
+
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+ ## Set to true if you would like to see extra information on logs
+ ## It turns BASH and NAMI debugging in minideb
+ ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
+ debug: false
+
+##
+## Init containers parameters:
+## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
+##
+volumePermissions:
+ enabled: true
+ image:
+ registry: docker.io
+ repository: bitnami/minideb
+ tag: latest
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: Always
+ ## Init container Security Context
+ securityContext:
+ runAsUser: 0
+
+## Pod Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+##
+securityContext:
+ enabled: true
+ fsGroup: 1001
+ runAsUser: 1001
+
+replication:
+ enabled: true
+ user: repl_user
+ password: repl_password
+ slaveReplicas: 2
+ ## Set synchronous commit mode: on, off, remote_apply, remote_write and local
+ ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL
+ synchronousCommit: "on"
+ ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication
+ ## NOTE: It cannot be > slaveReplicas
+ numSynchronousReplicas: 1
+ ## Replication Cluster application name. Useful for defining multiple replication policies
+ applicationName: my_application
+
+## PostgreSQL admin user
+## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
+postgresqlUsername: postgres
+
+## PostgreSQL password
+## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
+##
+# postgresqlPassword:
+
+## Create a database
+## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run
+##
+# postgresqlDatabase:
+
+## PostgreSQL password using existing secret
+## existingSecret: secret
+
+## Mount PostgreSQL secret as a file instead of passing environment variable
+# usePasswordFile: false
+
+## PostgreSQL configuration
+## Specify runtime configuration parameters as a dict, using camelCase, e.g.
+## {"sharedBuffers": "500MB"}
+## Alternatively, you can put your postgresql.conf under the files/ directory
+## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
+##
+# postgresqlConfiguration:
+
+## PostgreSQL extended configuration
+## As above, but _appended_ to the main configuration
+## Alternatively, you can put your *.conf under the files/conf.d/ directory
+## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
+##
+# postgresqlExtendedConf:
+
+## PostgreSQL client authentication configuration
+## Specify content for pg_hba.conf
+## Default: do not create pg_hba.conf
+## Alternatively, you can put your pg_hba.conf under the files/ directory
+# pgHbaConfiguration: |-
+# local all all trust
+# host all all localhost trust
+# host mydatabase mysuser 192.168.0.0/24 md5
+
+## ConfigMap with PostgreSQL configuration
+## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration
+# configurationConfigMap:
+
+## ConfigMap with PostgreSQL extended configuration
+# extendedConfConfigMap:
+
+## initdb scripts
+## Specify dictionnary of scripts to be run at first boot
+## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
+##
+# initdbScripts:
+# my_init_script.sh:|
+# #!/bin/sh
+# echo "Do something."
+
+## ConfigMap with scripts to be run at first boot
+## NOTE: This will override initdbScripts
+# initdbScriptsConfigMap:
+
+## PostgreSQL service configuration
+service:
+ ## PosgresSQL service type
+ type: ClusterIP
+ port: 5432
+
+ ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ##
+ # nodePort:
+
+ ## Provide any additional annotations which may be required. This can be used to
+ annotations: {}
+ ## Set the LoadBalancer service type to internal only.
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ # loadBalancerIP:
+
+## PostgreSQL data Persistent Volume Storage Class
+## If defined, storageClassName: <storageClass>
+## If set to "-", storageClassName: "", which disables dynamic provisioning
+## If undefined (the default) or set to null, no storageClassName spec is
+## set, choosing the default provisioner. (gp2 on AWS, standard on
+## GKE, AWS & OpenStack)
+##
+persistence:
+ enabled: true
+ ## A manually managed Persistent Volume and Claim
+ ## If defined, PVC must be created manually before volume will be bound
+ # existingClaim:
+ mountPath: /bitnami/postgresql
+ # storageClass: "-"
+ accessModes:
+ - ReadWriteOnce
+ size: 8Gi
+ annotations: {}
+
+## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+updateStrategy:
+ type: RollingUpdate
+
+##
+## PostgreSQL Master parameters
+##
+master:
+ ## Node, affinity and tolerations labels for pod assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
+ nodeSelector: {}
+ affinity: {}
+ tolerations: []
+
+##
+## PostgreSQL Slave parameters
+##
+slave:
+ ## Node, affinity and tolerations labels for pod assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
+ nodeSelector: {}
+ affinity: {}
+ tolerations: []
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+##
+resources:
+ requests:
+ memory: 256Mi
+ cpu: 250m
+
+networkPolicy:
+ ## Enable creation of NetworkPolicy resources.
+ ##
+ enabled: false
+
+ ## The Policy model to apply. When set to false, only pods with the correct
+ ## client label will have network access to the port PostgreSQL is listening
+ ## on. When true, PostgreSQL will accept connections from any source
+ ## (with the correct destination port).
+ ##
+ allowExternal: true
+
+## Configure extra options for liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+livenessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+## Configure metrics exporter
+##
+metrics:
+ enabled: true
+ # resources: {}
+ service:
+ type: ClusterIP
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9187"
+ loadBalancerIP:
+ image:
+ registry: docker.io
+ repository: wrouesnel/postgres_exporter
+ tag: v0.4.6
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+ ## Configure extra options for liveness and readiness probes
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+# Define custom environment variables to pass to the image here
+extraEnv: {}
--- /dev/null
+## Global Docker image registry
+### Please, note that this will override the image registry for all the images, including dependencies, configured to use the global value
+###
+## global:
+## imageRegistry:
+
+## Bitnami PostgreSQL image version
+## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
+##
+image:
+ registry: docker.io
+ repository: bitnami/postgresql
+ tag: 10.6.0
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: Always
+
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+ ## Set to true if you would like to see extra information on logs
+ ## It turns BASH and NAMI debugging in minideb
+ ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
+ debug: false
+
+##
+## Init containers parameters:
+## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
+##
+volumePermissions:
+ enabled: true
+ image:
+ registry: docker.io
+ repository: bitnami/minideb
+ tag: latest
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: Always
+ ## Init container Security Context
+ securityContext:
+ runAsUser: 0
+
+## Pod Security Context
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+##
+securityContext:
+ enabled: true
+ fsGroup: 1001
+ runAsUser: 1001
+
+replication:
+ enabled: false
+ user: repl_user
+ password: repl_password
+ slaveReplicas: 1
+ ## Set synchronous commit mode: on, off, remote_apply, remote_write and local
+ ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL
+ synchronousCommit: "off"
+ ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication
+ ## NOTE: It cannot be > slaveReplicas
+ numSynchronousReplicas: 0
+ ## Replication Cluster application name. Useful for defining multiple replication policies
+ applicationName: my_application
+
+## PostgreSQL admin user
+## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
+postgresqlUsername: postgres
+
+## PostgreSQL password
+## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
+##
+# postgresqlPassword:
+
+## PostgreSQL password using existing secret
+## existingSecret: secret
+
+## Mount PostgreSQL secret as a file instead of passing environment variable
+# usePasswordFile: false
+
+## Create a database
+## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run
+##
+# postgresqlDatabase:
+
+## PostgreSQL configuration
+## Specify runtime configuration parameters as a dict, using camelCase, e.g.
+## {"sharedBuffers": "500MB"}
+## Alternatively, you can put your postgresql.conf under the files/ directory
+## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
+##
+# postgresqlConfiguration:
+
+## PostgreSQL extended configuration
+## As above, but _appended_ to the main configuration
+## Alternatively, you can put your *.conf under the files/conf.d/ directory
+## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
+##
+# postgresqlExtendedConf:
+
+## PostgreSQL client authentication configuration
+## Specify content for pg_hba.conf
+## Default: do not create pg_hba.conf
+## Alternatively, you can put your pg_hba.conf under the files/ directory
+# pgHbaConfiguration: |-
+# local all all trust
+# host all all localhost trust
+# host mydatabase mysuser 192.168.0.0/24 md5
+
+## ConfigMap with PostgreSQL configuration
+## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration
+# configurationConfigMap:
+
+## ConfigMap with PostgreSQL extended configuration
+# extendedConfConfigMap:
+
+## initdb scripts
+## Specify dictionnary of scripts to be run at first boot
+## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
+##
+# initdbScripts:
+# my_init_script.sh:|
+# #!/bin/sh
+# echo "Do something."
+#
+## ConfigMap with scripts to be run at first boot
+## NOTE: This will override initdbScripts
+# initdbScriptsConfigMap:
+
+## Optional duration in seconds the pod needs to terminate gracefully.
+## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
+##
+# terminationGracePeriodSeconds: 30
+
+## PostgreSQL service configuration
+service:
+ ## PosgresSQL service type
+ type: ClusterIP
+ # clusterIP: None
+ port: 5432
+
+ ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ ##
+ # nodePort:
+
+ ## Provide any additional annotations which may be required. This can be used to
+ annotations: {}
+ ## Set the LoadBalancer service type to internal only.
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+ ##
+ # loadBalancerIP:
+
+## PostgreSQL data Persistent Volume Storage Class
+## If defined, storageClassName: <storageClass>
+## If set to "-", storageClassName: "", which disables dynamic provisioning
+## If undefined (the default) or set to null, no storageClassName spec is
+## set, choosing the default provisioner. (gp2 on AWS, standard on
+## GKE, AWS & OpenStack)
+##
+persistence:
+ enabled: true
+ ## A manually managed Persistent Volume and Claim
+ ## If defined, PVC must be created manually before volume will be bound
+ # existingClaim:
+ mountPath: /bitnami/postgresql
+ # storageClass: "-"
+ accessModes:
+ - ReadWriteOnce
+ size: 8Gi
+ annotations: {}
+
+## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+updateStrategy:
+ type: RollingUpdate
+
+##
+## PostgreSQL Master parameters
+##
+master:
+ ## Node, affinity and tolerations labels for pod assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
+ nodeSelector: {}
+ affinity: {}
+ tolerations: []
+
+##
+## PostgreSQL Slave parameters
+##
+slave:
+ ## Node, affinity and tolerations labels for pod assignment
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
+ nodeSelector: {}
+ affinity: {}
+ tolerations: []
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+##
+resources:
+ requests:
+ memory: 256Mi
+ cpu: 250m
+
+networkPolicy:
+ ## Enable creation of NetworkPolicy resources.
+ ##
+ enabled: false
+
+ ## The Policy model to apply. When set to false, only pods with the correct
+ ## client label will have network access to the port PostgreSQL is listening
+ ## on. When true, PostgreSQL will accept connections from any source
+ ## (with the correct destination port).
+ ##
+ allowExternal: true
+
+## Configure extra options for liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+livenessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+## Configure metrics exporter
+##
+metrics:
+ enabled: false
+ # resources: {}
+ service:
+ type: ClusterIP
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9187"
+ loadBalancerIP:
+ image:
+ registry: docker.io
+ repository: wrouesnel/postgres_exporter
+ tag: v0.4.6
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+ ## Configure extra options for liveness and readiness probes
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+# Define custom environment variables to pass to the image here
+extraEnv: {}
--- /dev/null
+env:
+ database: cassandra
+
+cassandra:
+ enabled: true
+postgres:
+ enabled: false
--- /dev/null
+# CI test for testing dbless deployment
+ingressController:
+ enabled: true
+env:
+ database: "off"
+postgresql:
+ enabled: false
--- /dev/null
+# Default values for kong.
+# Declare variables to be passed into your templates.
+
+image:
+ repository: kong
+ # repository: kong-docker-kong-enterprise-edition-docker.bintray.io/kong-enterprise-edition
+ tag: 1.2
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## If using the official Kong Enterprise registry above, you MUST provide a secret.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+waitImage:
+ repository: busybox
+ tag: latest
+
+# Specify Kong admin and proxy services configurations
+admin:
+ # If you want to specify annotations for the admin service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTPS traffic on the admin port
+ # if set to false also set readinessProbe and livenessProbe httpGet scheme's to 'HTTP'
+ useTLS: true
+ servicePort: 8444
+ containerPort: 8444
+ # Kong admin service type
+ type: NodePort
+ # Set a nodePort which is available
+ # nodePort: 32444
+ # Kong admin ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-admin.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+proxy:
+ # If you want to specify annotations for the proxy service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 80
+ containerPort: 8000
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 443
+ containerPort: 8443
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+manager:
+ # If you want to specify annotations for the Manager service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 8002
+ containerPort: 8002
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 8445
+ containerPort: 8445
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+portal:
+ # If you want to specify annotations for the Portal service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 8003
+ containerPort: 8003
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 8446
+ containerPort: 8446
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+portalapi:
+ # If you want to specify annotations for the Portal API service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 8004
+ containerPort: 8004
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 8447
+ containerPort: 8447
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+# Toggle Kong Enterprise features on or off
+# RBAC and SMTP configuration have additional options that must all be set together
+# Other settings should be added to the "env" settings below
+enterprise:
+ enabled: false
+ # Kong Enterprise license secret name
+ # This secret must contain a single 'license' key, containing your base64-encoded license data
+ # The license secret is required for all Kong Enterprise deployments
+ license_secret: you-must-create-a-kong-license-secret
+ # Session configuration secret
+ # The session conf secret is required if using RBAC or the Portal
+ vitals:
+ enabled: true
+ portal:
+ enabled: false
+ # portal_auth here sets the default authentication mechanism for the Portal
+ # FIXME This can be changed per-workspace, but must currently default to
+ # basic-auth to work around limitations with session configuration
+ portal_auth: basic-auth
+ # If the Portal is enabled and any workspace's Portal uses authentication,
+ # this Secret must contain an portal_session_conf key
+ # The key value must be a secret configuration, following the example at https://docs.konghq.com/enterprise/0.35-x/kong-manager/authentication/sessions/
+ session_conf_secret: you-must-create-a-portal-session-conf-secret
+ rbac:
+ enabled: false
+ admin_gui_auth: basic-auth
+ # If RBAC is enabled, this Secret must contain an admin_gui_session_conf key
+ # The key value must be a secret configuration, following the example at https://docs.konghq.com/enterprise/0.35-x/kong-manager/authentication/sessions/
+ session_conf_secret: you-must-create-an-rbac-session-conf-secret
+ # Set to the appropriate plugin config JSON if not using basic-auth
+ # admin_gui_auth_conf: ''
+ smtp:
+ enabled: false
+ portal_emails_from: none@example.com
+ portal_emails_reply_to: none@example.com
+ admin_emails_from: none@example.com
+ admin_emails_reply_to: none@example.com
+ smtp_admin_emails: none@example.com
+ smtp_host: smtp.example.com
+ smtp_port: 587
+ smtp_starttls: true
+ auth:
+ # If your SMTP server does not require authentication, this section can
+ # be left as-is. If smtp_username is set to anything other than an empty
+ # string, you must create a Secret with an smtp_password key containing
+ # your SMTP password and specify its name here.
+ smtp_username: '' # e.g. postmaster@example.com
+ smtp_password_secret: you-must-create-an-smtp-password
+
+# Set runMigrations to run Kong migrations
+runMigrations: true
+
+# Specify Kong configurations
+# Kong configurations guide https://getkong.org/docs/latest/configuration/
+env:
+ database: postgres
+ proxy_access_log: /dev/stdout
+ admin_access_log: /dev/stdout
+ admin_gui_access_log: /dev/stdout
+ portal_api_access_log: /dev/stdout
+ proxy_error_log: /dev/stderr
+ admin_error_log: /dev/stderr
+ admin_gui_error_log: /dev/stderr
+ portal_api_error_log: /dev/stderr
+
+# If you want to specify resources, uncomment the following
+# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+# readinessProbe for Kong pods
+# If using Kong Enterprise with RBAC, you must add a Kong-Admin-Token header
+readinessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ timeoutSeconds: 1
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 5
+
+# livenessProbe for Kong pods
+# If using Kong Enterprise with RBAC, you must add a Kong-Admin-Token header
+livenessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ periodSeconds: 30
+ successThreshold: 1
+ failureThreshold: 5
+
+# Affinity for pod assignment
+# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+# affinity: {}
+
+# Tolerations for pod assignment
+# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+tolerations: []
+
+# Node labels for pod assignment
+# Ref: https://kubernetes.io/docs/user-guide/node-selection/
+nodeSelector: {}
+
+# Annotation to be added to Kong pods
+podAnnotations: {}
+
+# Kong pod count
+replicaCount: 1
+
+# Kong has a choice of either Postgres or Cassandra as a backend datatstore.
+# This chart allows you to choose either of them with the `database.type`
+# parameter. Postgres is chosen by default.
+
+# Additionally, this chart allows you to use your own database or spin up a new
+# instance by using the `postgres.enabled` or `cassandra.enabled` parameters.
+# Enabling both will create both databases in your cluster, but only one
+# will be used by Kong based on the `env.database` parameter.
+# Postgres is enabled by default.
+
+# Cassandra chart configs
+cassandra:
+ enabled: false
+
+# PostgreSQL chart configs
+postgresql:
+ enabled: true
+ postgresqlUsername: kong
+ postgresqlDatabase: kong
+ service:
+ port: 5432
+
+# Kong Ingress Controller's primary purpose is to satisfy Ingress resources
+# created in k8s. It uses CRDs for more fine grained control over routing and
+# for Kong specific configuration.
+ingressController:
+ enabled: false
+ image:
+ repository: kong-docker-kubernetes-ingress-controller.bintray.io/kong-ingress-controller
+ tag: 0.4.0
+ replicaCount: 1
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: "/healthz"
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: "/healthz"
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+
+ installCRDs: true
+
+ rbac:
+ # Specifies whether RBAC resources should be created
+ create: true
+
+ serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+ ingressClass: kong
--- /dev/null
+# CI test for Ingress controller basic installation
+ingressController:
+ enabled: true
--- /dev/null
+# CI test for LoadBalancer admin/proxy types
+
+admin:
+ useTLS: true
+ type: LoadBalancer
+ loadBalancerSourceRanges:
+ - 192.168.1.1/32
+ - 10.10.10.10/32
+
+proxy:
+ useTLS: true
+ type: LoadBalancer
+ loadBalancerSourceRanges:
+ - 192.168.1.1/32
+ - 10.10.10.10/32
+
+readinessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ timeoutSeconds: 1
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 5
+
+livenessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ periodSeconds: 30
+ successThreshold: 1
+ failureThreshold: 5
+
+postgresql:
+ enabled: true
+ postgresUser: kong
+ postgresDatabase: kong
+ service:
+ port: 5432
--- /dev/null
+dependencies:
+- name: postgresql
+ version: ~3.9.1
+ repository: https://kubernetes-charts.storage.googleapis.com/
+ condition: postgresql.enabled
+- name: cassandra
+ version: ~0.10.5
+ repository: https://kubernetes-charts-incubator.storage.googleapis.com/
+ condition: cassandra.enabled
--- /dev/null
+1. Kong Admin can be accessed inside the cluster using:
+ DNS={{ template "kong.fullname" . }}-admin.{{ .Release.Namespace }}.svc.cluster.local
+ PORT={{ .Values.admin.servicePort }}
+
+To connect from outside the K8s cluster:
+ {{- if contains "LoadBalancer" .Values.admin.type }}
+ HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "kong.fullname" . }}-admin -o jsonpath='{.status.loadBalancer.ingress.ip}')
+ PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "kong.fullname" . }}-admin -o jsonpath='{.spec.ports[0].nodePort}')
+
+ {{- else if contains "NodePort" .Values.admin.type }}
+ HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath='{.items[0].status.addresses[0].address}')
+ PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "kong.fullname" . }}-admin -o jsonpath='{.spec.ports[0].nodePort}')
+
+ {{- else if .Values.admin.ingress.enabled }}
+
+use one of the addresses listed below
+
+ {{- $path := .Values.admin.ingress.path -}}
+ {{- if .Values.admin.ingress.tls }}
+ {{- range .Values.admin.ingress.hosts }}
+ https://{{ . }}{{ $path }}
+ {{- end }}
+ {{- else }}
+ {{- range .Values.admin.ingress.hosts }}
+ http://{{ . }}{{ $path }}
+ {{- end }}
+ {{- end }}
+
+ {{- else if contains "ClusterIP" .Values.admin.type }}
+ HOST=127.0.0.1
+
+ # Execute the following commands to route the connection to Admin SSL port:
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ .Release.Name }}, app={{ template "kong.name" . }}" -o jsonpath="{.items[0].metadata.name}")
+ kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME {{ .Values.admin.servicePort }}:{{ .Values.admin.servicePort }}
+ {{- end }}
+
+
+2. Kong Proxy can be accessed inside the cluster using:
+ DNS={{ template "kong.fullname" . }}-proxy.{{ .Release.Namespace }}.svc.cluster.local
+ {{- if .Values.proxy.tls.enabled -}}
+ PORT={{ .Values.proxy.tls.servicePort }}
+ {{- else -}}
+ PORT={{ .Values.proxy.http.servicePort }}
+ {{- end -}}
+
+
+To connect from outside the K8s cluster:
+ {{- if contains "LoadBalancer" .Values.proxy.type }}
+ HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "kong.fullname" . }}-proxy -o jsonpath='{.status.loadBalancer.ingress.ip}')
+ PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "kong.fullname" . }}-proxy -o jsonpath='{.spec.ports[0].nodePort}')
+
+ {{- else if contains "NodePort" .Values.proxy.type }}
+ HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath='{.items[0].status.addresses[0].address}')
+ PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "kong.fullname" . }}-proxy -o jsonpath='{.spec.ports[0].nodePort}')
+
+ {{- else if .Values.proxy.ingress.enabled }}
+
+use one of the addresses listed below
+
+ {{- $path := .Values.proxy.ingress.path -}}
+ {{- if .Values.proxy.ingress.tls }}
+ {{- range .Values.proxy.ingress.hosts }}
+ https://{{ . }}{{ $path }}
+ {{- end }}
+ {{- else }}
+ {{- range .Values.proxy.ingress.hosts }}
+ http://{{ . }}{{ $path }}
+ {{- end }}
+ {{- end }}
+
+ {{- else if contains "ClusterIP" .Values.proxy.type }}
+ HOST=127.0.0.1
+
+ # Execute the following commands to route the connection to proxy SSL port:
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ .Release.Name }}, app={{ template "kong.name" . }}" -o jsonpath="{.items[0].metadata.name}")
+ {{- if .Values.proxy.tls.enabled -}}
+ kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME {{ .Values.proxy.tls.servicePort }}:{{ .Values.proxy.tls.servicePort }}
+ {{- else -}}
+ kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME {{ .Values.proxy.http.servicePort }}:{{ .Values.proxy.http.servicePort }}
+ {{- end -}}
+ {{- end }}
--- /dev/null
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+
+{{- define "kong.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "kong.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "kong.postgresql.fullname" -}}
+{{- $name := default "postgresql" .Values.postgresql.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "kong.cassandra.fullname" -}}
+{{- $name := default "cassandra" .Values.cassandra.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "kong.serviceAccountName" -}}
+{{- if .Values.ingressController.serviceAccount.create -}}
+ {{ default (include "kong.fullname" .) .Values.ingressController.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the KONG_PROXY_LISTEN value string
+*/}}
+{{- define "kong.kongProxyListenValue" -}}
+
+{{- if and .Values.proxy.http.enabled .Values.proxy.tls.enabled -}}
+ 0.0.0.0:{{ .Values.proxy.http.containerPort }},0.0.0.0:{{ .Values.proxy.tls.containerPort }} ssl
+{{- else -}}
+{{- if .Values.proxy.http.enabled -}}
+ 0.0.0.0:{{ .Values.proxy.http.containerPort }}
+{{- end -}}
+{{- if .Values.proxy.tls.enabled -}}
+ 0.0.0.0:{{ .Values.proxy.tls.containerPort }} ssl
+{{- end -}}
+{{- end -}}
+
+{{- end }}
+
+{{/*
+Create the KONG_ADMIN_GUI_LISTEN value string
+*/}}
+{{- define "kong.kongManagerListenValue" -}}
+
+{{- if and .Values.manager.http.enabled .Values.manager.tls.enabled -}}
+ 0.0.0.0:{{ .Values.manager.http.containerPort }},0.0.0.0:{{ .Values.manager.tls.containerPort }} ssl
+{{- else -}}
+{{- if .Values.manager.http.enabled -}}
+ 0.0.0.0:{{ .Values.manager.http.containerPort }}
+{{- end -}}
+{{- if .Values.manager.tls.enabled -}}
+ 0.0.0.0:{{ .Values.manager.tls.containerPort }} ssl
+{{- end -}}
+{{- end -}}
+
+{{- end }}
+
+{{/*
+Create the KONG_PORTAL_GUI_LISTEN value string
+*/}}
+{{- define "kong.kongPortalListenValue" -}}
+
+{{- if and .Values.portal.http.enabled .Values.portal.tls.enabled -}}
+ 0.0.0.0:{{ .Values.portal.http.containerPort }},0.0.0.0:{{ .Values.portal.tls.containerPort }} ssl
+{{- else -}}
+{{- if .Values.portal.http.enabled -}}
+ 0.0.0.0:{{ .Values.portal.http.containerPort }}
+{{- end -}}
+{{- if .Values.portal.tls.enabled -}}
+ 0.0.0.0:{{ .Values.portal.tls.containerPort }} ssl
+{{- end -}}
+{{- end -}}
+
+{{- end }}
+
+{{/*
+Create the KONG_PORTAL_API_LISTEN value string
+*/}}
+{{- define "kong.kongPortalApiListenValue" -}}
+
+{{- if and .Values.portalapi.http.enabled .Values.portalapi.tls.enabled -}}
+ 0.0.0.0:{{ .Values.portalapi.http.containerPort }},0.0.0.0:{{ .Values.portalapi.tls.containerPort }} ssl
+{{- else -}}
+{{- if .Values.portalapi.http.enabled -}}
+ 0.0.0.0:{{ .Values.portalapi.http.containerPort }}
+{{- end -}}
+{{- if .Values.portalapi.tls.enabled -}}
+ 0.0.0.0:{{ .Values.portalapi.tls.containerPort }} ssl
+{{- end -}}
+{{- end -}}
+
+{{- end }}
+
+{{/*
+Create the ingress servicePort value string
+*/}}
+
+{{- define "kong.ingress.servicePort" -}}
+{{- if .tls.enabled -}}
+ {{ .tls.servicePort }}
+{{- else -}}
+ {{ .http.servicePort }}
+{{- end -}}
+{{- end -}}
+
+
+{{- define "kong.env" -}}
+{{- range $key, $val := .Values.env }}
+- name: KONG_{{ $key | upper}}
+{{- $valueType := printf "%T" $val -}}
+{{ if eq $valueType "map[string]interface {}" }}
+{{ toYaml $val | indent 2 -}}
+{{- else }}
+ value: {{ $val | quote -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "kong.wait-for-db" -}}
+- name: wait-for-db
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ {{- if .Values.enterprise.enabled }}
+ {{- include "kong.license" . | nindent 2 }}
+ {{- end }}
+ {{- if .Values.postgresql.enabled }}
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.cassandra.enabled }}
+ - name: KONG_CASSANDRA_CONTACT_POINTS
+ value: {{ template "kong.cassandra.fullname" . }}
+ {{- end }}
+ {{- include "kong.env" . | nindent 2 }}
+ command: [ "/bin/sh", "-c", "until kong start; do echo 'waiting for db'; sleep 1; done; kong stop" ]
+{{- end -}}
+
+{{- define "kong.controller-container" -}}
+- name: ingress-controller
+ args:
+ - /kong-ingress-controller
+ # Service from were we extract the IP address/es to use in Ingress status
+ - --publish-service={{ .Release.Namespace }}/{{ template "kong.fullname" . }}-proxy
+ # Set the ingress class
+ - --ingress-class={{ .Values.ingressController.ingressClass }}
+ - --election-id=kong-ingress-controller-leader-{{ .Values.ingressController.ingressClass }}
+ # the kong URL points to the kong admin api server
+ {{- if .Values.admin.useTLS }}
+ - --kong-url=https://localhost:{{ .Values.admin.containerPort }}
+ - --admin-tls-skip-verify # TODO make this configurable
+ {{- else }}
+ - --kong-url=http://localhost:{{ .Values.admin.containerPort }}
+ {{- end }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ image: "{{ .Values.ingressController.image.repository }}:{{ .Values.ingressController.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /healthz
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /healthz
+ port: 10254
+ scheme: HTTP
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ resources:
+{{ toYaml .Values.ingressController.resources | indent 10 }}
+{{- end -}}
+
+{{/*
+Retrieve Kong Enterprise license from a secret and make it available in env vars
+*/}}
+{{- define "kong.license" -}}
+- name: KONG_LICENSE_DATA
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.enterprise.license_secret }}
+ key: license
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.rbac.create .Values.ingressController.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ template "kong.fullname" . }}
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ - nodes
+ - pods
+ - secrets
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - "extensions"
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - apiGroups:
+ - "extensions"
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+ - apiGroups:
+ - "configuration.konghq.com"
+ resources:
+ - kongplugins
+ - kongcredentials
+ - kongconsumers
+ - kongingresses
+ verbs:
+ - get
+ - list
+ - watch
+{{- end -}}
--- /dev/null
+{{- if (and (.Values.ingressController.enabled) (not (eq .Values.env.database "off"))) }}
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+ name: "{{ template "kong.fullname" . }}-controller"
+ labels:
+ app: "{{ template "kong.name" . }}"
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ component: "controller"
+spec:
+ replicas: {{ .Values.ingressController.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: "controller"
+ template:
+ metadata:
+ {{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: "controller"
+ spec:
+ serviceAccountName: {{ template "kong.serviceAccountName" . }}
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ {{- include "kong.wait-for-db" . | nindent 6 }}
+ containers:
+ - name: admin-api
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: KONG_PROXY_LISTEN
+ value: 'off'
+ {{- if .Values.enterprise.enabled }}
+ {{- if .Values.enterprise.rbac.enabled }}
+ # TODO: uncomment this once we have a means of securely providing the
+ # controller its token using a secret.
+ #- name: KONG_ENFORCE_RBAC
+ # value: "on"
+ {{- end }}
+ # the controller admin API should not receive requests to create admins or developers
+ # never enable SMTP on it as such
+ {{- if .Values.enterprise.smtp.enabled }}
+ - name: KONG_SMTP_MOCK
+ value: "on"
+ {{- else }}
+ - name: KONG_SMTP_MOCK
+ value: "on"
+ {{- end }}
+ {{- include "kong.license" . | nindent 8 }}
+ {{- end }}
+ {{- include "kong.env" . | indent 8 }}
+ {{- if .Values.admin.useTLS }}
+ - name: KONG_ADMIN_LISTEN
+ value: "0.0.0.0:{{ .Values.admin.containerPort }} ssl"
+ {{- else }}
+ - name: KONG_ADMIN_LISTEN
+ value: 0.0.0.0:{{ .Values.admin.containerPort }}
+ {{- end }}
+ {{- if .Values.postgresql.enabled }}
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.cassandra.enabled }}
+ - name: KONG_CASSANDRA_CONTACT_POINTS
+ value: {{ template "kong.cassandra.fullname" . }}
+ {{- end }}
+ ports:
+ - name: admin
+ containerPort: {{ .Values.admin.containerPort }}
+ protocol: TCP
+ readinessProbe:
+{{ toYaml .Values.readinessProbe | indent 10 }}
+ livenessProbe:
+{{ toYaml .Values.livenessProbe | indent 10 }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ {{- include "kong.controller-container" . | nindent 6 }}
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.rbac.create .Values.ingressController.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "kong.fullname" . }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "kong.fullname" . }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "kong.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.rbac.create .Values.ingressController.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+ name: {{ template "kong.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "kong.fullname" . }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "kong.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.rbac.create .Values.ingressController.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+ name: {{ template "kong.fullname" . }}
+ namespace: {{ .Release.namespace }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - pods
+ - secrets
+ - namespaces
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ resourceNames:
+ # Defaults to "<election-id>-<ingress-class>"
+ # Here: "<kong-ingress-controller-leader-nginx>-<nginx>"
+ # This has to be adapted if you change either parameter
+ # when launching the nginx-ingress-controller.
+ - "kong-ingress-controller-leader-{{ .Values.ingressController.ingressClass }}-{{ .Values.ingressController.ingressClass }}"
+ verbs:
+ - get
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.enabled .Values.ingressController.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "kong.serviceAccountName" . }}
+ namespace: {{ .Release.namespace }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.enabled .Values.ingressController.installCRDs -}}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kongconsumers.configuration.konghq.com
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ group: configuration.konghq.com
+ version: v1
+ scope: Namespaced
+ names:
+ kind: KongConsumer
+ plural: kongconsumers
+ shortNames:
+ - kc
+ additionalPrinterColumns:
+ - name: Username
+ type: string
+ description: Username of a Kong Consumer
+ JSONPath: .username
+ - name: Age
+ type: date
+ description: Age
+ JSONPath: .metadata.creationTimestamp
+ validation:
+ openAPIV3Schema:
+ properties:
+ username:
+ type: string
+ custom_id:
+ type: string
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.enabled .Values.ingressController.installCRDs -}}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kongcredentials.configuration.konghq.com
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ group: configuration.konghq.com
+ version: v1
+ scope: Namespaced
+ names:
+ kind: KongCredential
+ plural: kongcredentials
+ additionalPrinterColumns:
+ - name: Credential-type
+ type: string
+ description: Type of credential
+ JSONPath: .type
+ - name: Age
+ type: date
+ description: Age
+ JSONPath: .metadata.creationTimestamp
+ - name: Consumer-Ref
+ type: string
+ description: Owner of the credential
+ JSONPath: .consumerRef
+ validation:
+ openAPIV3Schema:
+ required:
+ - consumerRef
+ - type
+ properties:
+ consumerRef:
+ type: string
+ type:
+ type: string
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.enabled .Values.ingressController.installCRDs -}}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kongingresses.configuration.konghq.com
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ group: configuration.konghq.com
+ version: v1
+ scope: Namespaced
+ names:
+ kind: KongIngress
+ plural: kongingresses
+ shortNames:
+ - ki
+ validation:
+ openAPIV3Schema:
+ properties:
+ upstream:
+ type: object
+ route:
+ properties:
+ methods:
+ type: array
+ items:
+ type: string
+ regex_priority:
+ type: integer
+ strip_path:
+ type: boolean
+ preserve_host:
+ type: boolean
+ protocols:
+ type: array
+ items:
+ type: string
+ enum:
+ - http
+ - https
+ proxy:
+ type: object
+ properties:
+ protocol:
+ type: string
+ enum:
+ - http
+ - https
+ path:
+ type: string
+ pattern: ^/.*$
+ retries:
+ type: integer
+ minimum: 0
+ connect_timeout:
+ type: integer
+ minimum: 0
+ read_timeout:
+ type: integer
+ minimum: 0
+ write_timeout:
+ type: integer
+ minimum: 0
+ upstream:
+ type: object
+ properties:
+ hash_on:
+ type: string
+ hash_on_cookie:
+ type: string
+ hash_on_cookie_path:
+ type: string
+ hash_on_header:
+ type: string
+ hash_fallback_header:
+ type: string
+ hash_fallback:
+ type: string
+ slots:
+ type: integer
+ minimum: 10
+ healthchecks:
+ type: object
+ properties:
+ active:
+ type: object
+ properties:
+ concurrency:
+ type: integer
+ minimum: 1
+ timeout:
+ type: integer
+ minimum: 0
+ http_path:
+ type: string
+ pattern: ^/.*$
+ healthy: &healthy
+ type: object
+ properties:
+ http_statuses:
+ type: array
+ items:
+ type: integer
+ interval:
+ type: integer
+ minimum: 0
+ successes:
+ type: integer
+ minimum: 0
+ unhealthy: &unhealthy
+ type: object
+ properties:
+ http_failures:
+ type: integer
+ minimum: 0
+ http_statuses:
+ type: array
+ items:
+ type: integer
+ interval:
+ type: integer
+ minimum: 0
+ tcp_failures:
+ type: integer
+ minimum: 0
+ timeout:
+ type: integer
+ minimum: 0
+ passive:
+ type: object
+ properties:
+ healthy: *healthy
+ unhealthy: *unhealthy
+{{- end -}}
--- /dev/null
+{{- if and .Values.ingressController.enabled .Values.ingressController.installCRDs -}}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kongplugins.configuration.konghq.com
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ group: configuration.konghq.com
+ version: v1
+ scope: Namespaced
+ names:
+ kind: KongPlugin
+ plural: kongplugins
+ shortNames:
+ - kp
+ additionalPrinterColumns:
+ - name: Plugin-Type
+ type: string
+ description: Name of the plugin
+ JSONPath: .plugin
+ - name: Age
+ type: date
+ description: Age
+ JSONPath: .metadata.creationTimestamp
+ - name: Disabled
+ type: boolean
+ description: Indicates if the plugin is disabled
+ JSONPath: .disabled
+ priority: 1
+ - name: Config
+ type: string
+ description: Configuration of the plugin
+ JSONPath: .config
+ priority: 1
+ validation:
+ openAPIV3Schema:
+ required:
+ - plugin
+ properties:
+ plugin:
+ type: string
+ disabled:
+ type: boolean
+ config:
+ type: object
+{{- end -}}
--- /dev/null
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+ name: "{{ template "kong.fullname" . }}"
+ labels:
+ app: "{{ template "kong.name" . }}"
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ component: app
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
+ template:
+ metadata:
+ {{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
+ spec:
+ {{- if (and (.Values.ingressController) (eq .Values.env.database "off")) }}
+ serviceAccountName: {{ template "kong.serviceAccountName" . }}
+ {{ end }}
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if not (eq .Values.env.database "off") }}
+ initContainers:
+ {{- include "kong.wait-for-db" . | nindent 6 }}
+ {{ end }}
+ containers:
+ {{- if (and (.Values.ingressController) (eq .Values.env.database "off")) }}
+ {{- include "kong.controller-container" . | nindent 6 }}
+ {{ end }}
+ - name: {{ template "kong.name" . }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ {{- if not .Values.env.admin_listen }}
+ {{- if .Values.admin.useTLS }}
+ - name: KONG_ADMIN_LISTEN
+ value: "0.0.0.0:{{ .Values.admin.containerPort }} ssl"
+ {{- else }}
+ - name: KONG_ADMIN_LISTEN
+ value: 0.0.0.0:{{ .Values.admin.containerPort }}
+ {{- end }}
+ {{- end }}
+ {{- if not .Values.env.proxy_listen }}
+ - name: KONG_PROXY_LISTEN
+ value: {{ template "kong.kongProxyListenValue" . }}
+ {{- end }}
+ {{- if and (not .Values.env.admin_gui_listen) (.Values.enterprise.enabled) }}
+ - name: KONG_ADMIN_GUI_LISTEN
+ value: {{ template "kong.kongManagerListenValue" . }}
+ {{- end }}
+ {{- if and (not .Values.env.portal_gui_listen) (.Values.enterprise.enabled) (.Values.enterprise.portal.enabled) }}
+ - name: KONG_PORTAL_GUI_LISTEN
+ value: {{ template "kong.kongPortalListenValue" . }}
+ {{- end }}
+ {{- if and (not .Values.env.portal_api_listen) (.Values.enterprise.enabled) (.Values.enterprise.portal.enabled) }}
+ - name: KONG_PORTAL_API_LISTEN
+ value: {{ template "kong.kongPortalApiListenValue" . }}
+ {{- end }}
+ - name: KONG_NGINX_DAEMON
+ value: "off"
+ {{- if .Values.enterprise.enabled }}
+ {{- if .Values.enterprise.vitals.enabled }}
+ - name: KONG_VITALS
+ value: "on"
+ {{- end }}
+ {{- if .Values.enterprise.portal.enabled }}
+ - name: KONG_PORTAL
+ value: "on"
+ {{- if .Values.enterprise.portal.portal_auth }}
+ - name: KONG_PORTAL_AUTH
+ value: {{ .Values.enterprise.portal.portal_auth }}
+ - name: KONG_PORTAL_SESSION_CONF
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.enterprise.portal.session_conf_secret }}
+ key: portal_session_conf
+ {{- end }}
+ {{- end }}
+ {{- if .Values.enterprise.rbac.enabled }}
+ - name: KONG_ENFORCE_RBAC
+ value: "on"
+ - name: KONG_ADMIN_GUI_AUTH
+ value: {{ .Values.enterprise.rbac.admin_gui_auth | default "basic-auth" }}
+ - name: KONG_ADMIN_GUI_AUTH_CONF
+ value: {{ toJson .Values.enterprise.rbac.admin_gui_auth_conf | default "" }}
+ - name: KONG_ADMIN_GUI_SESSION_CONF
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.enterprise.rbac.session_conf_secret }}
+ key: admin_gui_session_conf
+ {{- end }}
+ {{- if .Values.enterprise.smtp.enabled }}
+ - name: KONG_PORTAL_EMAILS_FROM
+ value: {{ .Values.enterprise.smtp.portal_emails_from }}
+ - name: KONG_PORTAL_EMAILS_REPLY_TO
+ value: {{ .Values.enterprise.smtp.portal_emails_reply_to }}
+ - name: KONG_ADMIN_EMAILS_FROM
+ value: {{ .Values.enterprise.smtp.admin_emails_from }}
+ - name: KONG_ADMIN_EMAILS_REPLY_TO
+ value: {{ .Values.enterprise.smtp.admin_emails_reply_to }}
+ - name: KONG_SMTP_HOST
+ value: {{ .Values.enterprise.smtp.smtp_host }}
+ - name: KONG_SMTP_PORT
+ value: {{ .Values.enterprise.smtp.smtp_port }}
+ - name: KONG_SMTP_STARTTLS
+ value: {{ .Values.enterprise.smtp.smtp_starttls }}
+ {{- if .Values.enterprise.smtp.auth.smtp_username }}
+ - name: KONG_SMTP_USERNAME
+ value: {{ .Values.enterprise.smtp.auth.smtp_username }}
+ - name: KONG_SMTP_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.enterprise.smtp.auth.smtp_password }}
+ key: smtp_password
+ {{- end }}
+ {{- else }}
+ - name: KONG_SMTP_MOCK
+ value: "on"
+ {{- end }}
+ {{- include "kong.license" . | nindent 8 }}
+ {{- end }}
+ {{- include "kong.env" . | indent 8 }}
+ {{- if .Values.postgresql.enabled }}
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.cassandra.enabled }}
+ - name: KONG_CASSANDRA_CONTACT_POINTS
+ value: {{ template "kong.cassandra.fullname" . }}
+ {{- end }}
+ ports:
+ - name: admin
+ containerPort: {{ .Values.admin.containerPort }}
+ {{- if .Values.admin.hostPort }}
+ hostPort: {{ .Values.admin.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- if .Values.proxy.http.enabled }}
+ - name: proxy
+ containerPort: {{ .Values.proxy.http.containerPort }}
+ {{- if .Values.proxy.http.hostPort }}
+ hostPort: {{ .Values.proxy.http.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.proxy.tls.enabled }}
+ - name: proxy-tls
+ containerPort: {{ .Values.proxy.tls.containerPort }}
+ {{- if .Values.proxy.tls.hostPort }}
+ hostPort: {{ .Values.proxy.tls.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.enterprise.enabled }}
+ {{- if .Values.manager.http.enabled }}
+ - name: manager
+ containerPort: {{ .Values.manager.http.containerPort }}
+ {{- if .Values.manager.http.hostPort }}
+ hostPort: {{ .Values.manager.http.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.manager.tls.enabled }}
+ - name: manager-tls
+ containerPort: {{ .Values.manager.tls.containerPort }}
+ {{- if .Values.manager.tls.hostPort }}
+ hostPort: {{ .Values.manager.tls.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.portal.http.enabled }}
+ - name: portal
+ containerPort: {{ .Values.portal.http.containerPort }}
+ {{- if .Values.portal.http.hostPort }}
+ hostPort: {{ .Values.portal.http.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.portal.tls.enabled }}
+ - name: portal-tls
+ containerPort: {{ .Values.portal.tls.containerPort }}
+ {{- if .Values.portal.tls.hostPort }}
+ hostPort: {{ .Values.portal.tls.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.portalapi.http.enabled }}
+ - name: portalapi
+ containerPort: {{ .Values.portalapi.http.containerPort }}
+ {{- if .Values.portalapi.http.hostPort }}
+ hostPort: {{ .Values.portalapi.http.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.portalapi.tls.enabled }}
+ - name: portalapi-tls
+ containerPort: {{ .Values.portalapi.tls.containerPort }}
+ {{- if .Values.portalapi.tls.hostPort }}
+ hostPort: {{ .Values.portalapi.tls.hostPort }}
+ {{- end}}
+ protocol: TCP
+ {{- end }}
+ {{- end }}
+ readinessProbe:
+{{ toYaml .Values.readinessProbe | indent 10 }}
+ livenessProbe:
+{{ toYaml .Values.livenessProbe | indent 10 }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+ {{- end }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
--- /dev/null
+{{- if .Values.admin.ingress.enabled -}}
+{{- $serviceName := include "kong.fullname" . -}}
+{{- $servicePort := .Values.admin.servicePort -}}
+{{- $path := .Values.admin.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ template "kong.fullname" . }}-admin
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ annotations:
+ {{- range $key, $value := .Values.admin.ingress.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.admin.ingress.hosts }}
+ - host: {{ $host }}
+ http:
+ paths:
+ - path: {{ $path }}
+ backend:
+ serviceName: {{ $serviceName }}-admin
+ servicePort: {{ $servicePort }}
+ {{- end -}}
+ {{- if .Values.admin.ingress.tls }}
+ tls:
+{{ toYaml .Values.admin.ingress.tls | indent 4 }}
+ {{- end -}}
+{{- end -}}
\ No newline at end of file
--- /dev/null
+{{- if .Values.enterprise.enabled }}
+{{- if .Values.manager.ingress.enabled -}}
+{{- $serviceName := include "kong.fullname" . -}}
+{{- $servicePort := include "kong.ingress.servicePort" .Values.manager -}}
+{{- $path := .Values.manager.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ template "kong.fullname" . }}-manager
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ annotations:
+ {{- range $key, $value := .Values.manager.ingress.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.manager.ingress.hosts }}
+ - host: {{ $host }}
+ http:
+ paths:
+ - path: {{ $path }}
+ backend:
+ serviceName: {{ $serviceName }}-manager
+ servicePort: {{ $servicePort }}
+ {{- end -}}
+ {{- if .Values.manager.ingress.tls }}
+ tls:
+{{ toYaml .Values.manager.ingress.tls | indent 4 }}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
--- /dev/null
+{{- if .Values.enterprise.enabled }}
+{{- if .Values.portalapi.ingress.enabled -}}
+{{- $serviceName := include "kong.fullname" . -}}
+{{- $servicePort := include "kong.ingress.servicePort" .Values.portalapi -}}
+{{- $path := .Values.portalapi.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ template "kong.fullname" . }}-portalapi
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ annotations:
+ {{- range $key, $value := .Values.portalapi.ingress.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.portalapi.ingress.hosts }}
+ - host: {{ $host }}
+ http:
+ paths:
+ - path: {{ $path }}
+ backend:
+ serviceName: {{ $serviceName }}-portalapi
+ servicePort: {{ $servicePort }}
+ {{- end -}}
+ {{- if .Values.portalapi.ingress.tls }}
+ tls:
+{{ toYaml .Values.portalapi.ingress.tls | indent 4 }}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
--- /dev/null
+{{- if .Values.enterprise.enabled }}
+{{- if .Values.portal.ingress.enabled -}}
+{{- $serviceName := include "kong.fullname" . -}}
+{{- $servicePort := include "kong.ingress.servicePort" .Values.portal -}}
+{{- $path := .Values.portal.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ template "kong.fullname" . }}-portal
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ annotations:
+ {{- range $key, $value := .Values.portal.ingress.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.portal.ingress.hosts }}
+ - host: {{ $host }}
+ http:
+ paths:
+ - path: {{ $path }}
+ backend:
+ serviceName: {{ $serviceName }}-portal
+ servicePort: {{ $servicePort }}
+ {{- end -}}
+ {{- if .Values.portal.ingress.tls }}
+ tls:
+{{ toYaml .Values.portal.ingress.tls | indent 4 }}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
--- /dev/null
+{{- if .Values.proxy.ingress.enabled -}}
+{{- $serviceName := include "kong.fullname" . -}}
+{{- $servicePort := include "kong.ingress.servicePort" .Values.proxy -}}
+{{- $path := .Values.proxy.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ template "kong.fullname" . }}-proxy
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ annotations:
+ {{- range $key, $value := .Values.proxy.ingress.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.proxy.ingress.hosts }}
+ - host: {{ $host }}
+ http:
+ paths:
+ - path: {{ $path }}
+ backend:
+ serviceName: {{ $serviceName }}-proxy
+ servicePort: {{ $servicePort }}
+ {{- end -}}
+ {{- if .Values.proxy.ingress.tls }}
+ tls:
+{{ toYaml .Values.proxy.ingress.tls | indent 4 }}
+ {{- end -}}
+{{- end -}}
--- /dev/null
+{{- if (and (.Values.runMigrations) (not (eq .Values.env.database "off"))) }}
+# Why is this Job duplicated and not using only helm hooks?
+# See: https://github.com/helm/charts/pull/7362
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ template "kong.fullname" . }}-post-upgrade-migrations
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ component: post-upgrade-migrations
+ annotations:
+ helm.sh/hook: "post-upgrade"
+ helm.sh/hook-delete-policy: "before-hook-creation"
+spec:
+ template:
+ metadata:
+ name: {{ template "kong.name" . }}-post-upgrade-migrations
+ labels:
+ app: {{ template "kong.name" . }}
+ release: "{{ .Release.Name }}"
+ component: post-upgrade-migrations
+ spec:
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.postgresql.enabled }}
+ initContainers:
+ - name: wait-for-postgres
+ image: "{{ .Values.waitImage.repository }}:{{ .Values.waitImage.tag }}"
+ env:
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ command: [ "/bin/sh", "-c", "until nc -zv $KONG_PG_HOST $KONG_PG_PORT -w1; do echo 'waiting for db'; sleep 1; done" ]
+ {{- end }}
+ containers:
+ - name: {{ template "kong.name" . }}-post-upgrade-migrations
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: KONG_NGINX_DAEMON
+ value: "off"
+ {{- if .Values.enterprise.enabled }}
+ {{- include "kong.license" . | nindent 8 }}
+ {{- end }}
+ {{- include "kong.env" . | indent 8 }}
+ {{- if .Values.postgresql.enabled }}
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.cassandra.enabled }}
+ - name: KONG_CASSANDRA_CONTACT_POINTS
+ value: {{ template "kong.cassandra.fullname" . }}
+ {{- end }}
+ command: [ "/bin/sh", "-c", "kong migrations finish" ]
+ restartPolicy: OnFailure
+{{- end }}
--- /dev/null
+{{- if (and (.Values.runMigrations) (not (eq .Values.env.database "off"))) }}
+# Why is this Job duplicated and not using only helm hooks?
+# See: https://github.com/helm/charts/pull/7362
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ template "kong.fullname" . }}-pre-upgrade-migrations
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ component: pre-upgrade-migrations
+ annotations:
+ helm.sh/hook: "pre-upgrade"
+ helm.sh/hook-delete-policy: "before-hook-creation"
+spec:
+ template:
+ metadata:
+ name: {{ template "kong.name" . }}-pre-upgrade-migrations
+ labels:
+ app: {{ template "kong.name" . }}
+ release: "{{ .Release.Name }}"
+ component: pre-upgrade-migrations
+ spec:
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.postgresql.enabled }}
+ initContainers:
+ - name: wait-for-postgres
+ image: "{{ .Values.waitImage.repository }}:{{ .Values.waitImage.tag }}"
+ env:
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ command: [ "/bin/sh", "-c", "until nc -zv $KONG_PG_HOST $KONG_PG_PORT -w1; do echo 'waiting for db'; sleep 1; done" ]
+ {{- end }}
+ containers:
+ - name: {{ template "kong.name" . }}-upgrade-migrations
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: KONG_NGINX_DAEMON
+ value: "off"
+ {{- if .Values.enterprise.enabled }}
+ {{- include "kong.license" . | nindent 8 }}
+ {{- end }}
+ {{- include "kong.env" . | indent 8 }}
+ {{- if .Values.postgresql.enabled }}
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.cassandra.enabled }}
+ - name: KONG_CASSANDRA_CONTACT_POINTS
+ value: {{ template "kong.cassandra.fullname" . }}
+ {{- end }}
+ command: [ "/bin/sh", "-c", "kong migrations up" ]
+ restartPolicy: OnFailure
+{{- end }}
--- /dev/null
+{{- if (and (.Values.runMigrations) (not (eq .Values.env.database "off"))) }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ template "kong.fullname" . }}-init-migrations
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ component: init-migrations
+spec:
+ template:
+ metadata:
+ name: {{ template "kong.name" . }}-init-migrations
+ labels:
+ app: {{ template "kong.name" . }}
+ release: "{{ .Release.Name }}"
+ component: init-migrations
+ spec:
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.postgresql.enabled }}
+ initContainers:
+ - name: wait-for-postgres
+ image: "{{ .Values.waitImage.repository }}:{{ .Values.waitImage.tag }}"
+ env:
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ command: [ "/bin/sh", "-c", "until nc -zv $KONG_PG_HOST $KONG_PG_PORT -w1; do echo 'waiting for db'; sleep 1; done" ]
+ {{- end }}
+ containers:
+ - name: {{ template "kong.name" . }}-migrations
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: KONG_NGINX_DAEMON
+ value: "off"
+ {{- if .Values.enterprise.enabled }}
+ {{- include "kong.license" . | nindent 8 }}
+ {{- end }}
+ {{- include "kong.env" . | indent 8 }}
+ {{- if .Values.postgresql.enabled }}
+ - name: KONG_PG_HOST
+ value: {{ template "kong.postgresql.fullname" . }}
+ - name: KONG_PG_PORT
+ value: "{{ .Values.postgresql.service.port }}"
+ - name: KONG_PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "kong.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- end }}
+ {{- if .Values.cassandra.enabled }}
+ - name: KONG_CASSANDRA_CONTACT_POINTS
+ value: {{ template "kong.cassandra.fullname" . }}
+ {{- end }}
+ command: [ "/bin/sh", "-c", "kong migrations bootstrap" ]
+ restartPolicy: OnFailure
+{{- end }}
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "kong.fullname" . }}-admin
+ annotations:
+ {{- range $key, $value := .Values.admin.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ type: {{ .Values.admin.type }}
+ {{- if eq .Values.admin.type "LoadBalancer" }}
+ {{- if .Values.admin.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.admin.loadBalancerIP }}
+ {{- end }}
+ {{- if .Values.admin.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.admin.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: kong-admin
+ port: {{ .Values.admin.servicePort }}
+ targetPort: {{ .Values.admin.containerPort }}
+ {{- if (and (eq .Values.admin.type "NodePort") (not (empty .Values.admin.nodePort))) }}
+ nodePort: {{ .Values.admin.nodePort }}
+ {{- end }}
+ protocol: TCP
+ selector:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
--- /dev/null
+{{- if .Values.enterprise.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "kong.fullname" . }}-manager
+ annotations:
+ {{- range $key, $value := .Values.manager.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ type: {{ .Values.manager.type }}
+ {{- if eq .Values.manager.type "LoadBalancer" }}
+ {{- if .Values.manager.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.manager.loadBalancerIP }}
+ {{- end }}
+ {{- if .Values.manager.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.manager.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ externalIPs:
+ {{- range $ip := .Values.manager.externalIPs }}
+ - {{ $ip }}
+ {{- end }}
+ ports:
+ {{- if .Values.manager.http.enabled }}
+ - name: kong-manager
+ port: {{ .Values.manager.http.servicePort }}
+ targetPort: {{ .Values.manager.http.containerPort }}
+ {{- if (and (eq .Values.manager.type "NodePort") (not (empty .Values.manager.http.nodePort))) }}
+ nodePort: {{ .Values.manager.http.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+ {{- if or .Values.manager.tls.enabled }}
+ - name: kong-manager-tls
+ port: {{ .Values.manager.tls.servicePort }}
+ targetPort: {{ .Values.manager.tls.containerPort }}
+ {{- if (and (eq .Values.manager.type "NodePort") (not (empty .Values.manager.tls.nodePort))) }}
+ nodePort: {{ .Values.manager.tls.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+
+
+ selector:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
+{{- end -}}
--- /dev/null
+{{- if .Values.enterprise.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "kong.fullname" . }}-portalapi
+ annotations:
+ {{- range $key, $value := .Values.portalapi.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ type: {{ .Values.portalapi.type }}
+ {{- if eq .Values.portalapi.type "LoadBalancer" }}
+ {{- if .Values.portalapi.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.portalapi.loadBalancerIP }}
+ {{- end }}
+ {{- if .Values.portalapi.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.portalapi.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ externalIPs:
+ {{- range $ip := .Values.portalapi.externalIPs }}
+ - {{ $ip }}
+ {{- end }}
+ ports:
+ {{- if .Values.portalapi.http.enabled }}
+ - name: kong-portalapi
+ port: {{ .Values.portalapi.http.servicePort }}
+ targetPort: {{ .Values.portalapi.http.containerPort }}
+ {{- if (and (eq .Values.portalapi.type "NodePort") (not (empty .Values.portalapi.http.nodePort))) }}
+ nodePort: {{ .Values.portalapi.http.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+ {{- if or .Values.portalapi.tls.enabled }}
+ - name: kong-portalapi-tls
+ port: {{ .Values.portalapi.tls.servicePort }}
+ targetPort: {{ .Values.portalapi.tls.containerPort }}
+ {{- if (and (eq .Values.portalapi.type "NodePort") (not (empty .Values.portalapi.tls.nodePort))) }}
+ nodePort: {{ .Values.portalapi.tls.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+
+
+ selector:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
+{{- end -}}
--- /dev/null
+{{- if .Values.enterprise.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "kong.fullname" . }}-portal
+ annotations:
+ {{- range $key, $value := .Values.portal.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ type: {{ .Values.portal.type }}
+ {{- if eq .Values.portal.type "LoadBalancer" }}
+ {{- if .Values.portal.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.portal.loadBalancerIP }}
+ {{- end }}
+ {{- if .Values.portal.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.portal.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ externalIPs:
+ {{- range $ip := .Values.portal.externalIPs }}
+ - {{ $ip }}
+ {{- end }}
+ ports:
+ {{- if .Values.portal.http.enabled }}
+ - name: kong-portal
+ port: {{ .Values.portal.http.servicePort }}
+ targetPort: {{ .Values.portal.http.containerPort }}
+ {{- if (and (eq .Values.portal.type "NodePort") (not (empty .Values.portal.http.nodePort))) }}
+ nodePort: {{ .Values.portal.http.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+ {{- if or .Values.portal.tls.enabled }}
+ - name: kong-portal-tls
+ port: {{ .Values.portal.tls.servicePort }}
+ targetPort: {{ .Values.portal.tls.containerPort }}
+ {{- if (and (eq .Values.portal.type "NodePort") (not (empty .Values.portal.tls.nodePort))) }}
+ nodePort: {{ .Values.portal.tls.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+
+
+ selector:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
+{{- end -}}
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "kong.fullname" . }}-proxy
+ annotations:
+ {{- range $key, $value := .Values.proxy.annotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ labels:
+ app: {{ template "kong.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ type: {{ .Values.proxy.type }}
+ {{- if eq .Values.proxy.type "LoadBalancer" }}
+ {{- if .Values.proxy.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.proxy.loadBalancerIP }}
+ {{- end }}
+ {{- if .Values.proxy.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.proxy.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ externalIPs:
+ {{- range $ip := .Values.proxy.externalIPs }}
+ - {{ $ip }}
+ {{- end }}
+ ports:
+ {{- if .Values.proxy.http.enabled }}
+ - name: kong-proxy
+ port: {{ .Values.proxy.http.servicePort }}
+ targetPort: {{ .Values.proxy.http.containerPort }}
+ {{- if (and (eq .Values.proxy.type "NodePort") (not (empty .Values.proxy.http.nodePort))) }}
+ nodePort: {{ .Values.proxy.http.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+ {{- if or .Values.proxy.tls.enabled }}
+ - name: kong-proxy-tls
+ port: {{ .Values.proxy.tls.servicePort }}
+ targetPort: {{ .Values.proxy.tls.containerPort }}
+ {{- if (and (eq .Values.proxy.type "NodePort") (not (empty .Values.proxy.tls.nodePort))) }}
+ nodePort: {{ .Values.proxy.tls.nodePort }}
+ {{- end }}
+ protocol: TCP
+ {{- end }}
+ {{- if .Values.proxy.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ .Values.proxy.externalTrafficPolicy }}
+ {{- end }}
+
+ selector:
+ app: {{ template "kong.name" . }}
+ release: {{ .Release.Name }}
+ component: app
--- /dev/null
+# Default values for kong.
+# Declare variables to be passed into your templates.
+
+image:
+ repository: kong
+ # repository: kong-docker-kong-enterprise-edition-docker.bintray.io/kong-enterprise-edition
+ tag: 1.2
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## If using the official Kong Enterprise registry above, you MUST provide a secret.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+waitImage:
+ repository: busybox
+ tag: latest
+
+# Specify Kong admin and proxy services configurations
+admin:
+ # If you want to specify annotations for the admin service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTPS traffic on the admin port
+ # if set to false also set readinessProbe and livenessProbe httpGet scheme's to 'HTTP'
+ useTLS: true
+ servicePort: 8444
+ containerPort: 8444
+ # Kong admin service type
+ type: NodePort
+ # Set a nodePort which is available
+ # nodePort: 32444
+ # Kong admin ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-admin.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+proxy:
+ # If you want to specify annotations for the proxy service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 80
+ containerPort: 8000
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 443
+ containerPort: 8443
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+manager:
+ # If you want to specify annotations for the Manager service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 8002
+ containerPort: 8002
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 8445
+ containerPort: 8445
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+portal:
+ # If you want to specify annotations for the Portal service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 8003
+ containerPort: 8003
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 8446
+ containerPort: 8446
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+portalapi:
+ # If you want to specify annotations for the Portal API service, uncomment the following
+ # line, add additional or adjust as needed, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
+
+ # HTTP plain-text traffic
+ http:
+ enabled: true
+ servicePort: 8004
+ containerPort: 8004
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32080
+
+ tls:
+ enabled: true
+ servicePort: 8447
+ containerPort: 8447
+ # Set a nodePort which is available if service type is NodePort
+ # nodePort: 32443
+
+ type: NodePort
+
+ # Kong proxy ingress settings.
+ ingress:
+ # Enable/disable exposure using ingress.
+ enabled: false
+ # TLS secret name.
+ # tls: kong-proxy.example.com-tls
+ # Array of ingress hosts.
+ hosts: []
+ # Map of ingress annotations.
+ annotations: {}
+ # Ingress path.
+ path: /
+
+ externalIPs: []
+
+# Toggle Kong Enterprise features on or off
+# RBAC and SMTP configuration have additional options that must all be set together
+# Other settings should be added to the "env" settings below
+enterprise:
+ enabled: false
+ # Kong Enterprise license secret name
+ # This secret must contain a single 'license' key, containing your base64-encoded license data
+ # The license secret is required for all Kong Enterprise deployments
+ license_secret: you-must-create-a-kong-license-secret
+ # Session configuration secret
+ # The session conf secret is required if using RBAC or the Portal
+ vitals:
+ enabled: true
+ portal:
+ enabled: false
+ # portal_auth here sets the default authentication mechanism for the Portal
+ # FIXME This can be changed per-workspace, but must currently default to
+ # basic-auth to work around limitations with session configuration
+ portal_auth: basic-auth
+ # If the Portal is enabled and any workspace's Portal uses authentication,
+ # this Secret must contain an portal_session_conf key
+ # The key value must be a secret configuration, following the example at https://docs.konghq.com/enterprise/0.35-x/kong-manager/authentication/sessions/
+ session_conf_secret: you-must-create-a-portal-session-conf-secret
+ rbac:
+ enabled: false
+ admin_gui_auth: basic-auth
+ # If RBAC is enabled, this Secret must contain an admin_gui_session_conf key
+ # The key value must be a secret configuration, following the example at https://docs.konghq.com/enterprise/0.35-x/kong-manager/authentication/sessions/
+ session_conf_secret: you-must-create-an-rbac-session-conf-secret
+ # Set to the appropriate plugin config JSON if not using basic-auth
+ # admin_gui_auth_conf: ''
+ smtp:
+ enabled: false
+ portal_emails_from: none@example.com
+ portal_emails_reply_to: none@example.com
+ admin_emails_from: none@example.com
+ admin_emails_reply_to: none@example.com
+ smtp_admin_emails: none@example.com
+ smtp_host: smtp.example.com
+ smtp_port: 587
+ smtp_starttls: true
+ auth:
+ # If your SMTP server does not require authentication, this section can
+ # be left as-is. If smtp_username is set to anything other than an empty
+ # string, you must create a Secret with an smtp_password key containing
+ # your SMTP password and specify its name here.
+ smtp_username: '' # e.g. postmaster@example.com
+ smtp_password_secret: you-must-create-an-smtp-password
+
+# Set runMigrations to run Kong migrations
+runMigrations: true
+
+# Specify Kong configurations
+# Kong configurations guide https://getkong.org/docs/latest/configuration/
+env:
+ database: postgres
+ proxy_access_log: /dev/stdout
+ admin_access_log: /dev/stdout
+ admin_gui_access_log: /dev/stdout
+ portal_api_access_log: /dev/stdout
+ proxy_error_log: /dev/stderr
+ admin_error_log: /dev/stderr
+ admin_gui_error_log: /dev/stderr
+ portal_api_error_log: /dev/stderr
+
+# If you want to specify resources, uncomment the following
+# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+# readinessProbe for Kong pods
+# If using Kong Enterprise with RBAC, you must add a Kong-Admin-Token header
+readinessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ timeoutSeconds: 1
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 5
+
+# livenessProbe for Kong pods
+# If using Kong Enterprise with RBAC, you must add a Kong-Admin-Token header
+livenessProbe:
+ httpGet:
+ path: "/status"
+ port: admin
+ scheme: HTTPS
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ periodSeconds: 30
+ successThreshold: 1
+ failureThreshold: 5
+
+# Affinity for pod assignment
+# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+# affinity: {}
+
+# Tolerations for pod assignment
+# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+tolerations: []
+
+# Node labels for pod assignment
+# Ref: https://kubernetes.io/docs/user-guide/node-selection/
+nodeSelector: {}
+
+# Annotation to be added to Kong pods
+podAnnotations: {}
+
+# Kong pod count
+replicaCount: 1
+
+# Kong has a choice of either Postgres or Cassandra as a backend datatstore.
+# This chart allows you to choose either of them with the `database.type`
+# parameter. Postgres is chosen by default.
+
+# Additionally, this chart allows you to use your own database or spin up a new
+# instance by using the `postgres.enabled` or `cassandra.enabled` parameters.
+# Enabling both will create both databases in your cluster, but only one
+# will be used by Kong based on the `env.database` parameter.
+# Postgres is enabled by default.
+
+# Cassandra chart configs
+cassandra:
+ enabled: false
+
+# PostgreSQL chart configs
+postgresql:
+ enabled: true
+ postgresqlUsername: kong
+ postgresqlDatabase: kong
+ service:
+ port: 5432
+
+# Kong Ingress Controller's primary purpose is to satisfy Ingress resources
+# created in k8s. It uses CRDs for more fine grained control over routing and
+# for Kong specific configuration.
+ingressController:
+ enabled: false
+ image:
+ repository: kong-docker-kubernetes-ingress-controller.bintray.io/kong-ingress-controller
+ tag: 0.4.0
+ replicaCount: 1
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: "/healthz"
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: "/healthz"
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+
+ installCRDs: true
+
+ rbac:
+ # Specifies whether RBAC resources should be created
+ create: true
+
+ serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+ ingressClass: kong