From 34324925890960dd146e1b2caa37a5ca086e819f Mon Sep 17 00:00:00 2001 From: Beppe Vanrolleghem Date: Mon, 9 Mar 2020 12:27:10 +0100 Subject: [PATCH] helm consul toegevoegd ma nie als submodule? --- consul-helm/.circleci/config.yml | 51 ++ consul-helm/.helmignore | 4 + consul-helm/CHANGELOG.md | 389 ++++++++ consul-helm/CONTRIBUTING.md | 166 ++++ consul-helm/Chart.yaml | 9 + consul-helm/LICENSE.md | 353 +++++++ consul-helm/Makefile | 6 + consul-helm/README.md | 38 + consul-helm/helm-consul-values.yaml | 25 + consul-helm/templates/NOTES.txt | 20 + consul-helm/templates/_helpers.tpl | 63 ++ consul-helm/templates/client-clusterrole.yaml | 33 + .../templates/client-clusterrolebinding.yaml | 19 + .../templates/client-config-configmap.yaml | 23 + consul-helm/templates/client-daemonset.yaml | 333 +++++++ .../templates/client-podsecuritypolicy.yaml | 66 ++ .../templates/client-serviceaccount.yaml | 12 + .../client-snapshot-agent-clusterrole.yaml | 36 + ...ent-snapshot-agent-clusterrolebinding.yaml | 21 + .../client-snapshot-agent-deployment.yaml | 140 +++ ...ient-snapshot-agent-podsecuritypolicy.yaml | 40 + .../client-snapshot-agent-serviceaccount.yaml | 14 + ...connect-inject-authmethod-clusterrole.yaml | 19 + ...-inject-authmethod-clusterrolebinding.yaml | 39 + ...nect-inject-authmethod-serviceaccount.yaml | 14 + .../templates/connect-inject-clusterrole.yaml | 37 + .../connect-inject-clusterrolebinding.yaml | 19 + .../templates/connect-inject-deployment.yaml | 200 ++++ .../connect-inject-mutatingwebhook.yaml | 30 + .../connect-inject-podsecuritypolicy.yaml | 38 + .../templates/connect-inject-service.yaml | 22 + .../connect-inject-serviceaccount.yaml | 12 + consul-helm/templates/dns-service.yaml | 34 + .../enterprise-license-clusterrole.yaml | 35 + ...enterprise-license-clusterrolebinding.yaml | 21 + .../templates/enterprise-license-job.yaml | 116 +++ .../enterprise-license-podsecuritypolicy.yaml | 37 + .../enterprise-license-serviceaccount.yaml | 14 + .../templates/mesh-gateway-clusterrole.yaml | 34 + .../mesh-gateway-clusterrolebinding.yaml | 20 + .../templates/mesh-gateway-deployment.yaml | 204 +++++ .../mesh-gateway-podsecuritypolicy.yaml | 39 + .../templates/mesh-gateway-service.yaml | 33 + .../mesh-gateway-serviceaccount.yaml | 13 + .../server-acl-init-cleanup-clusterrole.yaml | 25 + ...r-acl-init-cleanup-clusterrolebinding.yaml | 21 + .../server-acl-init-cleanup-job.yaml | 56 ++ ...er-acl-init-cleanup-podsecuritypolicy.yaml | 37 + ...erver-acl-init-cleanup-serviceaccount.yaml | 14 + .../server-acl-init-clusterrole.yaml | 50 + .../server-acl-init-clusterrolebinding.yaml | 21 + .../templates/server-acl-init-job.yaml | 130 +++ .../server-acl-init-podsecuritypolicy.yaml | 37 + .../server-acl-init-serviceaccount.yaml | 14 + consul-helm/templates/server-clusterrole.yaml | 22 + .../templates/server-clusterrolebinding.yaml | 19 + .../templates/server-config-configmap.yaml | 68 ++ .../templates/server-disruptionbudget.yaml | 21 + .../templates/server-podsecuritypolicy.yaml | 40 + consul-helm/templates/server-service.yaml | 69 ++ .../templates/server-serviceaccount.yaml | 12 + consul-helm/templates/server-statefulset.yaml | 248 +++++ .../templates/sync-catalog-clusterrole.yaml | 49 + .../sync-catalog-clusterrolebinding.yaml | 20 + .../templates/sync-catalog-deployment.yaml | 185 ++++ .../sync-catalog-podsecuritypolicy.yaml | 38 + .../sync-catalog-serviceaccount.yaml | 13 + consul-helm/templates/tests/test-runner.yaml | 61 ++ .../tls-init-cleanup-clusterrole.yaml | 35 + .../tls-init-cleanup-clusterrolebinding.yaml | 22 + .../templates/tls-init-cleanup-job.yaml | 54 ++ .../tls-init-cleanup-podsecuritypolicy.yaml | 36 + .../tls-init-cleanup-serviceaccount.yaml | 14 + .../templates/tls-init-clusterrole.yaml | 32 + .../tls-init-clusterrolebinding.yaml | 25 + consul-helm/templates/tls-init-job.yaml | 113 +++ .../templates/tls-init-podsecuritypolicy.yaml | 39 + .../templates/tls-init-serviceaccount.yaml | 17 + consul-helm/templates/ui-service.yaml | 39 + consul-helm/test/acceptance/_helpers.bash | 56 ++ consul-helm/test/acceptance/server.bats | 19 + consul-helm/test/docker/Test.dockerfile | 51 ++ consul-helm/test/terraform/main.tf | 71 ++ consul-helm/test/terraform/outputs.tf | 7 + .../test/terraform/service-account.yaml | 18 + consul-helm/test/terraform/variables.tf | 17 + consul-helm/test/unit/_helpers.bash | 4 + consul-helm/test/unit/client-clusterrole.bats | 104 +++ .../test/unit/client-clusterrolebinding.bats | 53 ++ consul-helm/test/unit/client-configmap.bats | 77 ++ consul-helm/test/unit/client-daemonset.bats | 855 +++++++++++++++++ .../test/unit/client-podsecuritypolicy.bats | 131 +++ .../test/unit/client-serviceaccount.bats | 53 ++ .../client-snapshot-agent-clusterrole.bats | 87 ++ ...ent-snapshot-agent-clusterrolebinding.bats | 44 + .../client-snapshot-agent-deployment.bats | 271 ++++++ ...ient-snapshot-agent-podsecuritypolicy.bats | 34 + .../client-snapshot-agent-serviceaccount.bats | 44 + ...connect-inject-authmethod-clusterrole.bats | 46 + ...-inject-authmethod-clusterrolebinding.bats | 46 + ...nect-inject-authmethod-serviceaccount.bats | 46 + .../test/unit/connect-inject-clusterrole.bats | 119 +++ .../connect-inject-clusterrolebinding.bats | 55 ++ .../test/unit/connect-inject-deployment.bats | 783 ++++++++++++++++ .../unit/connect-inject-mutatingwebhook.bats | 55 ++ .../connect-inject-podsecuritypolicy.bats | 44 + .../test/unit/connect-inject-service.bats | 44 + .../unit/connect-inject-serviceaccount.bats | 55 ++ consul-helm/test/unit/dns-service.bats | 89 ++ .../unit/enterprise-license-clusterrole.bats | 97 ++ ...enterprise-license-clusterrolebinding.bats | 55 ++ .../test/unit/enterprise-license-job.bats | 201 ++++ .../enterprise-license-podsecuritypolicy.bats | 68 ++ .../enterprise-license-serviceaccount.bats | 55 ++ consul-helm/test/unit/helpers.bats | 102 +++ .../test/unit/mesh-gateway-clusterrole.bats | 76 ++ .../unit/mesh-gateway-clusterrolebinding.bats | 38 + .../test/unit/mesh-gateway-deployment.bats | 656 +++++++++++++ .../unit/mesh-gateway-podsecuritypolicy.bats | 25 + .../test/unit/mesh-gateway-service.bats | 204 +++++ .../unit/mesh-gateway-serviceaccount.bats | 25 + .../server-acl-init-cleanup-clusterrole.bats | 58 ++ ...r-acl-init-cleanup-clusterrolebinding.bats | 44 + .../unit/server-acl-init-cleanup-job.bats | 65 ++ ...er-acl-init-cleanup-podsecuritypolicy.bats | 34 + ...erver-acl-init-cleanup-serviceaccount.bats | 44 + .../unit/server-acl-init-clusterrole.bats | 72 ++ .../server-acl-init-clusterrolebinding.bats | 44 + .../test/unit/server-acl-init-job.bats | 660 ++++++++++++++ .../server-acl-init-podsecuritypolicy.bats | 34 + .../unit/server-acl-init-serviceaccount.bats | 44 + consul-helm/test/unit/server-clusterrole.bats | 78 ++ .../test/unit/server-clusterrolebinding.bats | 53 ++ consul-helm/test/unit/server-configmap.bats | 167 ++++ .../test/unit/server-disruptionbudget.bats | 127 +++ .../test/unit/server-podsecuritypolicy.bats | 33 + consul-helm/test/unit/server-service.bats | 105 +++ .../test/unit/server-serviceaccount.bats | 53 ++ consul-helm/test/unit/server-statefulset.bats | 664 ++++++++++++++ .../test/unit/sync-catalog-clusterrole.bats | 106 +++ .../unit/sync-catalog-clusterrolebinding.bats | 53 ++ .../test/unit/sync-catalog-deployment.bats | 630 +++++++++++++ .../unit/sync-catalog-podsecuritypolicy.bats | 44 + .../unit/sync-catalog-serviceaccount.bats | 53 ++ consul-helm/test/unit/test-runner.bats | 22 + .../unit/tls-init-cleanup-clusterrole.bats | 67 ++ .../tls-init-cleanup-clusterrolebinding.bats | 55 ++ .../test/unit/tls-init-cleanup-job.bats | 55 ++ .../tls-init-cleanup-podsecuritypolicy.bats | 44 + .../unit/tls-init-cleanup-serviceaccount.bats | 55 ++ .../test/unit/tls-init-clusterrole.bats | 67 ++ .../unit/tls-init-clusterrolebinding.bats | 55 ++ consul-helm/test/unit/tls-init-job.bats | 112 +++ .../test/unit/tls-init-podsecuritypolicy.bats | 44 + .../test/unit/tls-init-serviceaccount.bats | 55 ++ consul-helm/test/unit/ui-service.bats | 183 ++++ consul-helm/values.yaml | 862 ++++++++++++++++++ 157 files changed, 14508 insertions(+) create mode 100644 consul-helm/.circleci/config.yml create mode 100644 consul-helm/.helmignore create mode 100644 consul-helm/CHANGELOG.md create mode 100644 consul-helm/CONTRIBUTING.md create mode 100644 consul-helm/Chart.yaml create mode 100644 consul-helm/LICENSE.md create mode 100644 consul-helm/Makefile create mode 100644 consul-helm/README.md create mode 100644 consul-helm/helm-consul-values.yaml create mode 100644 consul-helm/templates/NOTES.txt create mode 100644 consul-helm/templates/_helpers.tpl create mode 100644 consul-helm/templates/client-clusterrole.yaml create mode 100644 consul-helm/templates/client-clusterrolebinding.yaml create mode 100644 consul-helm/templates/client-config-configmap.yaml create mode 100644 consul-helm/templates/client-daemonset.yaml create mode 100644 consul-helm/templates/client-podsecuritypolicy.yaml create mode 100644 consul-helm/templates/client-serviceaccount.yaml create mode 100644 consul-helm/templates/client-snapshot-agent-clusterrole.yaml create mode 100644 consul-helm/templates/client-snapshot-agent-clusterrolebinding.yaml create mode 100644 consul-helm/templates/client-snapshot-agent-deployment.yaml create mode 100644 consul-helm/templates/client-snapshot-agent-podsecuritypolicy.yaml create mode 100644 consul-helm/templates/client-snapshot-agent-serviceaccount.yaml create mode 100644 consul-helm/templates/connect-inject-authmethod-clusterrole.yaml create mode 100644 consul-helm/templates/connect-inject-authmethod-clusterrolebinding.yaml create mode 100644 consul-helm/templates/connect-inject-authmethod-serviceaccount.yaml create mode 100644 consul-helm/templates/connect-inject-clusterrole.yaml create mode 100644 consul-helm/templates/connect-inject-clusterrolebinding.yaml create mode 100644 consul-helm/templates/connect-inject-deployment.yaml create mode 100644 consul-helm/templates/connect-inject-mutatingwebhook.yaml create mode 100644 consul-helm/templates/connect-inject-podsecuritypolicy.yaml create mode 100644 consul-helm/templates/connect-inject-service.yaml create mode 100644 consul-helm/templates/connect-inject-serviceaccount.yaml create mode 100644 consul-helm/templates/dns-service.yaml create mode 100644 consul-helm/templates/enterprise-license-clusterrole.yaml create mode 100644 consul-helm/templates/enterprise-license-clusterrolebinding.yaml create mode 100644 consul-helm/templates/enterprise-license-job.yaml create mode 100644 consul-helm/templates/enterprise-license-podsecuritypolicy.yaml create mode 100644 consul-helm/templates/enterprise-license-serviceaccount.yaml create mode 100644 consul-helm/templates/mesh-gateway-clusterrole.yaml create mode 100644 consul-helm/templates/mesh-gateway-clusterrolebinding.yaml create mode 100644 consul-helm/templates/mesh-gateway-deployment.yaml create mode 100644 consul-helm/templates/mesh-gateway-podsecuritypolicy.yaml create mode 100644 consul-helm/templates/mesh-gateway-service.yaml create mode 100644 consul-helm/templates/mesh-gateway-serviceaccount.yaml create mode 100644 consul-helm/templates/server-acl-init-cleanup-clusterrole.yaml create mode 100644 consul-helm/templates/server-acl-init-cleanup-clusterrolebinding.yaml create mode 100644 consul-helm/templates/server-acl-init-cleanup-job.yaml create mode 100644 consul-helm/templates/server-acl-init-cleanup-podsecuritypolicy.yaml create mode 100644 consul-helm/templates/server-acl-init-cleanup-serviceaccount.yaml create mode 100644 consul-helm/templates/server-acl-init-clusterrole.yaml create mode 100644 consul-helm/templates/server-acl-init-clusterrolebinding.yaml create mode 100644 consul-helm/templates/server-acl-init-job.yaml create mode 100644 consul-helm/templates/server-acl-init-podsecuritypolicy.yaml create mode 100644 consul-helm/templates/server-acl-init-serviceaccount.yaml create mode 100644 consul-helm/templates/server-clusterrole.yaml create mode 100644 consul-helm/templates/server-clusterrolebinding.yaml create mode 100644 consul-helm/templates/server-config-configmap.yaml create mode 100644 consul-helm/templates/server-disruptionbudget.yaml create mode 100644 consul-helm/templates/server-podsecuritypolicy.yaml create mode 100644 consul-helm/templates/server-service.yaml create mode 100644 consul-helm/templates/server-serviceaccount.yaml create mode 100644 consul-helm/templates/server-statefulset.yaml create mode 100644 consul-helm/templates/sync-catalog-clusterrole.yaml create mode 100644 consul-helm/templates/sync-catalog-clusterrolebinding.yaml create mode 100644 consul-helm/templates/sync-catalog-deployment.yaml create mode 100644 consul-helm/templates/sync-catalog-podsecuritypolicy.yaml create mode 100644 consul-helm/templates/sync-catalog-serviceaccount.yaml create mode 100644 consul-helm/templates/tests/test-runner.yaml create mode 100644 consul-helm/templates/tls-init-cleanup-clusterrole.yaml create mode 100644 consul-helm/templates/tls-init-cleanup-clusterrolebinding.yaml create mode 100644 consul-helm/templates/tls-init-cleanup-job.yaml create mode 100644 consul-helm/templates/tls-init-cleanup-podsecuritypolicy.yaml create mode 100644 consul-helm/templates/tls-init-cleanup-serviceaccount.yaml create mode 100644 consul-helm/templates/tls-init-clusterrole.yaml create mode 100644 consul-helm/templates/tls-init-clusterrolebinding.yaml create mode 100644 consul-helm/templates/tls-init-job.yaml create mode 100644 consul-helm/templates/tls-init-podsecuritypolicy.yaml create mode 100644 consul-helm/templates/tls-init-serviceaccount.yaml create mode 100644 consul-helm/templates/ui-service.yaml create mode 100644 consul-helm/test/acceptance/_helpers.bash create mode 100644 consul-helm/test/acceptance/server.bats create mode 100644 consul-helm/test/docker/Test.dockerfile create mode 100644 consul-helm/test/terraform/main.tf create mode 100644 consul-helm/test/terraform/outputs.tf create mode 100644 consul-helm/test/terraform/service-account.yaml create mode 100644 consul-helm/test/terraform/variables.tf create mode 100644 consul-helm/test/unit/_helpers.bash create mode 100644 consul-helm/test/unit/client-clusterrole.bats create mode 100644 consul-helm/test/unit/client-clusterrolebinding.bats create mode 100755 consul-helm/test/unit/client-configmap.bats create mode 100755 consul-helm/test/unit/client-daemonset.bats create mode 100644 consul-helm/test/unit/client-podsecuritypolicy.bats create mode 100644 consul-helm/test/unit/client-serviceaccount.bats create mode 100644 consul-helm/test/unit/client-snapshot-agent-clusterrole.bats create mode 100644 consul-helm/test/unit/client-snapshot-agent-clusterrolebinding.bats create mode 100644 consul-helm/test/unit/client-snapshot-agent-deployment.bats create mode 100644 consul-helm/test/unit/client-snapshot-agent-podsecuritypolicy.bats create mode 100644 consul-helm/test/unit/client-snapshot-agent-serviceaccount.bats create mode 100644 consul-helm/test/unit/connect-inject-authmethod-clusterrole.bats create mode 100644 consul-helm/test/unit/connect-inject-authmethod-clusterrolebinding.bats create mode 100644 consul-helm/test/unit/connect-inject-authmethod-serviceaccount.bats create mode 100644 consul-helm/test/unit/connect-inject-clusterrole.bats create mode 100644 consul-helm/test/unit/connect-inject-clusterrolebinding.bats create mode 100755 consul-helm/test/unit/connect-inject-deployment.bats create mode 100755 consul-helm/test/unit/connect-inject-mutatingwebhook.bats create mode 100644 consul-helm/test/unit/connect-inject-podsecuritypolicy.bats create mode 100755 consul-helm/test/unit/connect-inject-service.bats create mode 100644 consul-helm/test/unit/connect-inject-serviceaccount.bats create mode 100755 consul-helm/test/unit/dns-service.bats create mode 100644 consul-helm/test/unit/enterprise-license-clusterrole.bats create mode 100644 consul-helm/test/unit/enterprise-license-clusterrolebinding.bats create mode 100644 consul-helm/test/unit/enterprise-license-job.bats create mode 100644 consul-helm/test/unit/enterprise-license-podsecuritypolicy.bats create mode 100644 consul-helm/test/unit/enterprise-license-serviceaccount.bats create mode 100644 consul-helm/test/unit/helpers.bats create mode 100644 consul-helm/test/unit/mesh-gateway-clusterrole.bats create mode 100644 consul-helm/test/unit/mesh-gateway-clusterrolebinding.bats create mode 100755 consul-helm/test/unit/mesh-gateway-deployment.bats create mode 100644 consul-helm/test/unit/mesh-gateway-podsecuritypolicy.bats create mode 100755 consul-helm/test/unit/mesh-gateway-service.bats create mode 100644 consul-helm/test/unit/mesh-gateway-serviceaccount.bats create mode 100644 consul-helm/test/unit/server-acl-init-cleanup-clusterrole.bats create mode 100644 consul-helm/test/unit/server-acl-init-cleanup-clusterrolebinding.bats create mode 100644 consul-helm/test/unit/server-acl-init-cleanup-job.bats create mode 100644 consul-helm/test/unit/server-acl-init-cleanup-podsecuritypolicy.bats create mode 100644 consul-helm/test/unit/server-acl-init-cleanup-serviceaccount.bats create mode 100644 consul-helm/test/unit/server-acl-init-clusterrole.bats create mode 100644 consul-helm/test/unit/server-acl-init-clusterrolebinding.bats create mode 100644 consul-helm/test/unit/server-acl-init-job.bats create mode 100644 consul-helm/test/unit/server-acl-init-podsecuritypolicy.bats create mode 100644 consul-helm/test/unit/server-acl-init-serviceaccount.bats create mode 100644 consul-helm/test/unit/server-clusterrole.bats create mode 100644 consul-helm/test/unit/server-clusterrolebinding.bats create mode 100755 consul-helm/test/unit/server-configmap.bats create mode 100755 consul-helm/test/unit/server-disruptionbudget.bats create mode 100644 consul-helm/test/unit/server-podsecuritypolicy.bats create mode 100755 consul-helm/test/unit/server-service.bats create mode 100644 consul-helm/test/unit/server-serviceaccount.bats create mode 100755 consul-helm/test/unit/server-statefulset.bats create mode 100755 consul-helm/test/unit/sync-catalog-clusterrole.bats create mode 100755 consul-helm/test/unit/sync-catalog-clusterrolebinding.bats create mode 100755 consul-helm/test/unit/sync-catalog-deployment.bats create mode 100644 consul-helm/test/unit/sync-catalog-podsecuritypolicy.bats create mode 100755 consul-helm/test/unit/sync-catalog-serviceaccount.bats create mode 100644 consul-helm/test/unit/test-runner.bats create mode 100644 consul-helm/test/unit/tls-init-cleanup-clusterrole.bats create mode 100644 consul-helm/test/unit/tls-init-cleanup-clusterrolebinding.bats create mode 100644 consul-helm/test/unit/tls-init-cleanup-job.bats create mode 100644 consul-helm/test/unit/tls-init-cleanup-podsecuritypolicy.bats create mode 100644 consul-helm/test/unit/tls-init-cleanup-serviceaccount.bats create mode 100644 consul-helm/test/unit/tls-init-clusterrole.bats create mode 100644 consul-helm/test/unit/tls-init-clusterrolebinding.bats create mode 100644 consul-helm/test/unit/tls-init-job.bats create mode 100644 consul-helm/test/unit/tls-init-podsecuritypolicy.bats create mode 100644 consul-helm/test/unit/tls-init-serviceaccount.bats create mode 100755 consul-helm/test/unit/ui-service.bats create mode 100644 consul-helm/values.yaml diff --git a/consul-helm/.circleci/config.yml b/consul-helm/.circleci/config.yml new file mode 100644 index 0000000..e32fcfd --- /dev/null +++ b/consul-helm/.circleci/config.yml @@ -0,0 +1,51 @@ +version: 2 +jobs: + unit: + docker: + # This image is built from test/docker/Test.dockerfile + - image: hashicorpdev/consul-helm-test:0.3.0 + + steps: + - checkout + + - run: + name: Run Unit Tests + command: bats ./test/unit + + acceptance: + docker: + # This image is build from test/docker/Test.dockerfile + - image: hashicorpdev/consul-helm-test:0.3.0 + + steps: + - checkout + + - run: + name: terraform init & apply + command: | + terraform init test/terraform + echo "${GOOGLE_CREDENTIALS}" | gcloud auth activate-service-account --key-file=- + + terraform apply -var project=${CLOUDSDK_CORE_PROJECT} -var init_cli=true -auto-approve test/terraform + + - run: + name: Run acceptance tests + command: bats ./test/acceptance + + - run: + name: terraform destroy + command: | + terraform destroy -auto-approve + when: always + +workflows: + version: 2 + test: + jobs: + - unit + - acceptance: + requires: + - unit + filters: + branches: + only: master diff --git a/consul-helm/.helmignore b/consul-helm/.helmignore new file mode 100644 index 0000000..d1180d2 --- /dev/null +++ b/consul-helm/.helmignore @@ -0,0 +1,4 @@ +.git/ +.terraform/ +bin/ +test/ diff --git a/consul-helm/CHANGELOG.md b/consul-helm/CHANGELOG.md new file mode 100644 index 0000000..201a4ff --- /dev/null +++ b/consul-helm/CHANGELOG.md @@ -0,0 +1,389 @@ +## Unreleased + +BREAKING CHANGES: + +* `consul-k8s` `v0.12.0`+ is now required. The chart is passing new flags that are only available in this version. + To use this version if not using the chart defaults, set + ```yaml + global: + imageK8S: hashicorp/consul-k8s:0.12.0 + ``` + +IMPROVEMENTS: + +* Catalog Sync + * New Helm values have been added to configure which Kubernetes namespaces we will sync from. The defaults are shown below: + ```yaml + syncCatalog: + toConsul: true + k8sAllowNamespaces: ["*"] + k8sDenyNamespaces: ["kube-system", "kube-public"] + ``` + * If running Consul Enterprise 1.7.0+, Consul namespaces are supported. New Helm values have been added to allow configuring which + Consul namespaces Kubernetes services are synced to. See [https://www.consul.io/docs/platform/k8s/service-sync.html#consul-enterprise-namespaces](https://www.consul.io/docs/platform/k8s/service-sync.html#consul-enterprise-namespaces) for more details. + + ```yaml + global: + enableConsulNamespaces: true + syncCatalog: + consulNamespaces: + # consulDestinationNamespace is the name of the Consul namespace to register all + # k8s services into. If the Consul namespace does not already exist, + # it will be created. This will be ignored if `mirroringK8S` is true. + consulDestinationNamespace: "default" + + # mirroringK8S causes k8s services to be registered into a Consul namespace + # of the same name as their k8s namespace, optionally prefixed if + # `mirroringK8SPrefix` is set below. If the Consul namespace does not + # already exist, it will be created. Turning this on overrides the + # `consulDestinationNamespace` setting. + # `addK8SNamespaceSuffix` may no longer be needed if enabling this option. + mirroringK8S: false + + # If `mirroringK8S` is set to true, `mirroringK8SPrefix` allows each Consul namespace + # to be given a prefix. For example, if `mirroringK8SPrefix` is set to "k8s-", a + # service in the k8s `staging` namespace will be registered into the + # `k8s-staging` Consul namespace. + mirroringK8SPrefix: "" + ``` + +* Connect Inject + * New Helm values have been added to configure which Kubernetes namespaces we will inject pods in. The defaults are shown below: + ```yaml + connectInject: + k8sAllowNamespaces: ["*"] + k8sDenyNamespaces: [] + ``` + * If running Consul Enterprise 1.7.0+, Consul namespaces are supported. New Helm values have been added to allow configuring which Consul namespaces Kubernetes pods + are registered into. See [https://www.consul.io/docs/platform/k8s/connect.html#consul-enterprise-namespaces](https://www.consul.io/docs/platform/k8s/connect.html#consul-enterprise-namespaces) for more details. + ```yaml + global: + enableConsulNamespaces: true + + connectInject: + consulNamespaces: + # consulDestinationNamespace is the name of the Consul namespace to register all + # k8s pods into. If the Consul namespace does not already exist, + # it will be created. This will be ignored if `mirroringK8S` is true. + consulDestinationNamespace: "default" + + # mirroringK8S causes k8s pods to be registered into a Consul namespace + # of the same name as their k8s namespace, optionally prefixed if + # `mirroringK8SPrefix` is set below. If the Consul namespace does not + # already exist, it will be created. Turning this on overrides the + # `consulDestinationNamespace` setting. + mirroringK8S: false + + # If `mirroringK8S` is set to true, `mirroringK8SPrefix` allows each Consul namespace + # to be given a prefix. For example, if `mirroringK8SPrefix` is set to "k8s-", a + # pod in the k8s `staging` namespace will be registered into the + # `k8s-staging` Consul namespace. + mirroringK8SPrefix: "" + ``` + +BUG FIXES: + +* Fix template rendering bug when setting `connectInject.overrideAuthMethodName` [[GH-342](https://github.com/hashicorp/consul-helm/pull/342)] +* Set `"consul.hashicorp.com/connect-inject": "false"` annotation on enterprise license job so it is not connect injected [[GH-343](https://github.com/hashicorp/consul-helm/pull/343)] + +DEPRECATIONS: + +* `.syncCatalog.k8sSourceNamespace` should no longer be used. Instead, use the new `.syncCatalog.k8sAllowNamespaces` and `.syncCatalog.k8sDenyNamespaces` features. For backward compatibility, if both this and the allow/deny lists are set, the allow/deny lists will be ignored. + +NOTES: + +* Bootstrap ACLs: Previously, ACL policies were not updated after creation. Now, if namespaces are enabled, they are updated every time the ACL bootstrapper is run so that any namespace config changes can be adjusted. This change is only an issue if you are updating ACL policies after creation. + +## 0.16.2 (Jan 15, 2020) + +BUG FIXES: + + * Fix Helm Chart version. + +## 0.16.1 (Jan 14, 2020) + +BUG FIXES: + + * Fix a bug with the `tls-init` job, in which it could not correctly detect CA file + if Consul domain is provided [[GH-329](https://github.com/hashicorp/consul-helm/pull/329)]. + +## 0.16.0 (Jan 10, 2020) + +IMPROVEMENTS: + + * Optionally allow enabling TLS for Consul communication [[GH-313](https://github.com/hashicorp/consul-helm/pull/313)]. + If `global.tls.enabled` is set to `true`, the Helm chart will generate a CA and necessary certificates and + enable TLS for servers, clients, Connect injector, Mesh gateways, catalog sync, ACL bootstrapping, and snapshot agents. + + Note that this feature is only supported if both servers and clients are running + on Kubernetes. We will have better support for other deployment architectures, + as well as bringing your own CA, in the future. + + Also, note that simply turning on this feature and running `helm upgrade` will result in downtime if you are using + Consul Connect or Sync Catalog features. We will be adding instructions on how to do this upgrade without downtime soon. + Additionally, if you do decide to proceed with an upgrade despite downtime + and you're using Consul Connect, all application pods need to be recreated after upgrade, so that the Connect injector + can re-inject Envoy sidecars with TLS enabled. + + * Use the latest version of consul-k8s (0.11.0). + + * Add pod name as metadata to client nodes to help users map nodes in Consul to underlying client pods + [[GH-315](https://github.com/hashicorp/consul-helm/pull/315)]. + + * Rename `enterprise-licence.yaml` template to `enterprise-license-job.yaml` [[GH-321](https://github.com/hashicorp/consul-helm/pull/321)]. + +BUG FIXES: + + * Fix graceful termination for servers [[GH-313](https://github.com/hashicorp/consul-helm/pull/313)]. + `terminationGracePeriod` is now set to 30 seconds for the servers. The previous setting of 10 seconds + wasn't always enough time for a graceful leave, and in those cases, servers leave the cluster + in a "failed" state. Additionally, clients always set `leave_on_terminate` to `true`. + This replaces the `preStop` hook that was calling `consul leave`. Note that `leave_on_terminate` defaults + to true for clients as of Consul `0.7`, so this change only affects earlier versions. + + * Helm test runner now respects the provided namespace [[GH-320](https://github.com/hashicorp/consul-helm/pull/320)]. + + * Add pod security policies for the `enterprise-license` [[GH-325](https://github.com/hashicorp/consul-helm/pull/325)] + and the `server-acl-init` jobs [[GH-326](https://github.com/hashicorp/consul-helm/pull/325)]. + +## 0.15.0 (Dec 17, 2019) + +BREAKING CHANGES: + + * `connectInject.centralConfig` defaults to `true` now instead of `false`. This is to make it + easier to configure Connect via `service-defaults` and other routing + config [[GH-302](https://github.com/hashicorp/consul-helm/pull/302)]. + See https://www.consul.io/docs/agent/options.html#enable_central_service_config. + + If you wish to disable central config, set `connectInject.centralConfig` to + false in your local values file. NOTE: If `connectInject.enabled` is false, + then central config is not enabled so this change will not affect you. + + * Connect Inject: If using Connect Inject, you must also upgrade your `consul-k8s` version + to a version >= 0.10.1. A new flag is being passed in to `consul-k8s` which is not + supported in earlier versions. + +BUG FIXES: + * Fix bug with `fullnameOverride` and add new `global.name` setting for changing + the default prefix for resources. [[GH-286](https://github.com/hashicorp/consul-helm/issues/286)] + + * Connect Inject: Fix critical bug where Connect-registered services instances would be de-registered + when the Consul client on the same node was restarted. This fix adds a new + sidecar that ensures the service instance is always registered. [[GH-314](https://github.com/hashicorp/consul-helm/pull/314)] + +## 0.14.0 (Dec 10, 2019) + +IMPROVEMENTS: + + * Consul client DaemonSet can now use a [hostPath mount](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath) + for its data directory by setting the `client.dataDirectoryHostPath` value. + This setting is currently necessary to ensure that when a Consul client Pod is deleted, + e.g. during a Consul version upgrade, it does not lose its Connect service + registrations. In the next version, we plan to have services automatically + re-register which will remove the need for this. [[GH-298](https://github.com/hashicorp/consul-helm/pull/298)] + (**Update:** 0.15.0 uses a version of consul-k8s that fixes this bug and so hostPath is longer necessary) + + **Security Warning:** If using this setting, Pod Security Policies *must* be enabled on your cluster + and in this Helm chart (via the `global.enablePodSecurityPolicies` setting) + to prevent other Pods from mounting the same host path and gaining + access to all of Consul's data. Consul's data is not encrypted at rest. + + * New configuration option `client.updateStrategy` allows setting the update + strategy for the Client DaemonSet. [[GH-298](https://github.com/hashicorp/consul-helm/pull/298)] + + * New configuration option `client.dnsPolicy` allows setting the DNS + policy for the Client DaemonSet. [[GH-298](https://github.com/hashicorp/consul-helm/pull/298)] + +## 0.13.0 (Dec 5, 2019) + +BREAKING CHANGES: + + * `client.grpc` defaults to `true` now instead of `false`. This is to make it + harder to misconfigure Connect. [[GH-282](https://github.com/hashicorp/consul-helm/pull/282)] + + If you do not wish to enable gRPC for clients, set `client.grpc` to + `false` in your local values file. + + * Add `syncCatalog.addK8SNamespaceSuffix` and default it to `true`. [[GH-280](https://github.com/hashicorp/consul-helm/pull/280)] + Note: upgrading an existing installation will result in deregistering + of existing synced services in Consul and registering them with a new name. + If you would like to avoid this behavior set `syncCatalog.addK8SNamespaceSuffix` + to `false`. + + This changes the default service names registered from Kubernetes into Consul. Previously, we would register all Kubernetes services, regardless of namespace, as the same service in Consul. After this change, the default behaviour is to append the Kubernetes namespace to the Consul service name. For example, given a Kubernetes service `foo` in the namespace `namespace`, it would be registered in Consul as `foo-namespace`. The name can also be controlled via the `consul.hashicorp.com/service-name` annotation. + +IMPROVEMENTS: + + * Use the latest version of consul (1.6.2) + * Use the latest version of consul-k8s (0.9.5) + * Add `connectInject.overrideAuthMethodName` to allow setting the `-acl-auth-method flag` [[GH-278](https://github.com/hashicorp/consul-helm/pull/278)] + * Support external to k8s Consul servers [[GH-289](https://github.com/hashicorp/consul-helm/pull/289)] + +BUG FIXES: + + * Do not run `server-acl-init` during server rollout [[GH-292](https://github.com/hashicorp/consul-helm/pull/292)] + +## 0.12.0 (Oct 28, 2019) + +IMPROVEMENTS: + + * Use the latest version of consul-k8s (0.9.4) + * Support `bootstrapACLs` when only servers are enabled (not clients) [[GH-250](https://github.com/hashicorp/consul-helm/pull/250)] + * Use less privileges for catalog sync when not syncing to k8s [[GH-248](https://github.com/hashicorp/consul-helm/pull/248)] + * Enable disabling tests for users using `helm template` [[GH-249](https://github.com/hashicorp/consul-helm/pull/249)] + +BUG FIXES: + + * Fix `missing required field "caBundle"` bug [[GH-213](https://github.com/hashicorp/consul-helm/issues/213)] + + +## 0.11.0 (Oct 15, 2019) + +IMPROVEMENTS: + + * Use the latest version of Consul (1.6.1) + +BUG FIXES: + + * Use the latest version of `consul-k8s` (0.9.3) which fixes issues with upgrading between Helm chart + versions when `bootstrapACLs` is enabled [[GH-246](https://github.com/hashicorp/consul-helm/pull/246)]. + * Add `server-acl-init-cleanup` job to clean up the `server-acl-init` job + when it completes successfully [[GH-246](https://github.com/hashicorp/consul-helm/pull/246)]. + * Add the ability to specify Consul client daemonset affinity [[GH-165](https://github.com/hashicorp/consul-helm/pull/165)] + +## 0.10.0 (Oct 4, 2019) + +IMPROVEMENTS: + + * Use latest version of Consul (1.6.0) and consul-k8s (0.9.2) + * Remove random value from `helm test` to enable helmfile use [[GH-143](https://github.com/hashicorp/consul-helm/pull/143)] + +BUG FIXES: + + * The latest version of `consul-k8s` fixes issues with the `server-acl-init` + job failing repeatedly. + +## 0.9.0 (Sep 6, 2019) + +IMPROVEMENTS: + + * Support running the consul snapshot agent + * Support mesh gateways + * Allow setting annotations for the DNS service + * Allow setting `-consul-write-interval`, `-log-level` and `-k8s-source-namespace` flags for consul-k8s sync + * Allow setting DNS service IP + * Fix issues where acl-init job would fail repeatedly and ACLs would not be + bootstrapped + +BUG FIXES: + + * Fix enterprise license application when ACLs are turned off + * `rules` key must always be set (fixes https://github.com/hashicorp/consul-helm/issues/178) + +## 0.8.1 (May 9, 2019) + +IMPROVEMENTS: + + * Update default consul-k8s version to 0.8.1 for a central config bug fix + +## 0.8.0 (May 8, 2019) + +IMPROVEMENTS: + + * Support adding a prefix to Kubernetes services registered in Consul [[GH 140](https://github.com/hashicorp/consul-helm/issues/140)] + * Support an option for automatically bootstrapping ACLs in a Consul cluster that is run fully in Kubernetes. If connectInject is enabled with this option on, this also automatically configures a new Kubernetes AuthMethod so that injected services are automatically granted ACL tokens based on their Kubernetes service account. + * Support central service configuration including proxy defaults in Connect (available in Consul 1.5+). + * Remove the `gossipEncryption.enabled` option and instead have the implementation based on the existence of the secretName and secretKey. + +## 0.7.0 (March 21, 2019) + +BREAKING CHANGES: + + * If previously setting the release name to `consul`, you must now set `fullnameOverride: consul` in your config to prevent all resources being renamed. + +IMPROVEMENTS: + + * Support pod PriorityClasses for Consul servers and clients + * Add annotation and additional spec values for the UI service + * Add liveness and readiness checks to the catalog sync pod [[consul-k8s GH 57](https://github.com/hashicorp/consul-k8s/issues/57)] + * Support custom annotations for Consul clients and servers + * Support PodSecurityPolicies for Consul components + * Add service accounts and cluster roles/role bindings for each Consul component + * Add the namespace to the metadata volume name + * Support tolerations on Consul client and server pods + * Support gossip protocol encryption + * Allows custom environment variables for Consul client and server pods + * Support nodeSelectors for all components + +BUG FIXES: + + * Allow setting `extraConfig` variables using Helm's `--set` flag [[GH 74](https://github.com/hashicorp/consul-helm/issues/74)] + * Fix a formatting bug in the enterprise license command + +## 0.6.0 (February 8, 2019) + +IMPROVEMENTS: + + * Supports applying a Consul Enterprise License to the cluster through the Helm chart + * Support assigning an ACL token to the catalog sync process [[GH 26](https://github.com/hashicorp/consul-k8s/issues/26)] + * Updates default `consul` version to `1.4.2` and `consul-k8s` version to `0.5.0` + +BUG FIXES: + + * Switch the chart labels to a non-changing value to allow helm upgrades [[GH 86](https://github.com/hashicorp/consul-helm/issues/86)] + +## 0.5.0 (January 11, 2019) + +IMPROVEMENTS: + + * Supports new NodePort syncing style that uses the node ip address + * Adds a configurable tab to the Kubernetes -> Consul sync + +## 0.4.0 (December 7, 2018) + +IMPROVEMENTS: + + * RBAC support for `syncCatalog`. This will create the `ClusterRole`, `ClusterRoleBinding` + and `ServiceAccount` that is necessary for the catalog sync. [[GH-20](https://github.com/hashicorp/consul-helm/issues/20)] + * client: agents now have the node name set to the actual K8S node name [[GH-14](https://github.com/hashicorp/consul-helm/issues/14)] + * RBAC support for `connectInject`. This will create a `ClusterRole`, `ClusterRoleBinding`, + and `ServiceAccount` that is necessary for the connect injector to automatically generate + TLS certificates to interact with the Kubernetes API. + * Server affinity is now configurable. This makes it easier to run an entire + Consul cluster on Minikube. [[GH-13](https://github.com/hashicorp/consul-helm/issues/13)] + * Liveness probes are now http calls, reducing errors in the logs. + * All namespaced resources now specify the namespace metadata, making `helm template` usage in + a non-default namespace easier. [[GH-66](https://github.com/hashicorp/consul-helm/issues/66)] + * Add support for ClusterIP service syncing. + +BUG FIXES: + + * Add catalog sync default behavior flag to the chart [GH-28] + * Updated images to point to latest versions for 0.3.0. + * Add missing continuation characters to long commands [[GH-26](https://github.com/hashicorp/consul-helm/issues/26)]. + * connectInject: set the correct namespace for the MutatingWebhookConfiguration + so that deployments work in non-default namespaces. [[GH-38](https://github.com/hashicorp/consul-helm/issues/38)] + * Provide a valid `maxUnavailable` value when replicas=1. [[GH-58](https://github.com/hashicorp/consul-helm/issues/58)] + * Correctly sets server resource requirements. + * Update the `maxUnavailable` default calculation to allow rolling updates on 3 server clusters. [[GH-71](https://github.com/hashicorp/consul-helm/issues/71)] + +## 0.3.0 (October 11, 2018) + +FEATURES: + + * `connectInject` can install the automatic Connect sidecar injector. + +## 0.2.0 (September 26, 2018) + +FEATURES: + + * `syncCatalog` can install the [service catalog sync](https://www.hashicorp.com/blog/consul-and-kubernetes-service-catalog-sync) + functionality. + +IMPROVEMENTS: + + * server: support `storageClass` [[GH-7](https://github.com/hashicorp/consul-helm/issues/7)] + +## 0.1.0 + +Initial release diff --git a/consul-helm/CONTRIBUTING.md b/consul-helm/CONTRIBUTING.md new file mode 100644 index 0000000..25bc3ce --- /dev/null +++ b/consul-helm/CONTRIBUTING.md @@ -0,0 +1,166 @@ +# Contributing + +## Rebasing contributions against master + +PRs in this repo are merged using the [`rebase`](https://git-scm.com/docs/git-rebase) method. This keeps +the git history clean by adding the PR commits to the most recent end of the commit history. It also has +the benefit of keeping all the relevant commits for a given PR together, rather than spread throughout the +git history based on when the commits were first created. + +If the changes in your PR do not conflict with any of the existing code in the project, then Github supports +automatic rebasing when the PR is accepted into the code. However, if there are conflicts (there will be +a warning on the PR that reads "This branch cannot be rebased due to conflicts"), you will need to manually +rebase the branch on master, fixing any conflicts along the way before the code can be merged. + +## Testing + +The Helm chart ships with both unit and acceptance tests. + +The unit tests don't require any active Kubernetes cluster and complete +very quickly. These should be used for fast feedback during development. +The acceptance tests require a Kubernetes cluster with a configured `kubectl`. + +### Prequisites +* [Bats](https://github.com/bats-core/bats-core) + ```bash + brew install bats-core + ``` +* [yq](https://pypi.org/project/yq/) + ```bash + brew install python-yq + ``` +* [helm](https://helm.sh) + ```bash + brew install kubernetes-helm + ``` + +### Running The Tests +To run the unit tests: + + bats ./test/unit + +To run the acceptance tests: + + bats ./test/acceptance + +If the acceptance tests fail, deployed resources in the Kubernetes cluster +may not be properly cleaned up. We recommend recycling the Kubernetes cluster to +start from a clean slate. + +**Note:** There is a Terraform configuration in the +[`test/terraform/`](https://github.com/hashicorp/consul-helm/tree/master/test/terraform) directory +that can be used to quickly bring up a GKE cluster and configure +`kubectl` and `helm` locally. This can be used to quickly spin up a test +cluster for acceptance tests. Unit tests _do not_ require a running Kubernetes +cluster. + +### Writing Unit Tests + +Changes to the Helm chart should be accompanied by appropriate unit tests. + +#### Formatting + +- Put tests in the test file in the same order as the variables appear in the `values.yaml`. +- Start tests for a chart value with a header that says what is being tested, like this: + ``` + #-------------------------------------------------------------------- + # annotations + ``` + +- Name the test based on what it's testing in the following format (this will be its first line): + ``` + @test "
: " { + ``` + + When adding tests to an existing file, the first section will be the same as the other tests in the file. + +#### Test Details + +[Bats](https://github.com/bats-core/bats-core) provides a way to run commands in a shell and inspect the output in an automated way. +In all of the tests in this repo, the base command being run is [helm template](https://docs.helm.sh/helm/#helm-template) which turns the templated files into straight yaml output. +In this way, we're able to test that the various conditionals in the templates render as we would expect. + +Each test defines the files that should be rendered using the `-x` flag, then it might adjust chart values by adding `--set` flags as well. +The output from this `helm template` command is then piped to [yq](https://pypi.org/project/yq/). +`yq` allows us to pull out just the information we're interested in, either by referencing its position in the yaml file directly or giving information about it (like its length). +The `-r` flag can be used with `yq` to return a raw string instead of a quoted one which is especially useful when looking for an exact match. + +The test passes or fails based on the conditional at the end that is in square brackets, which is a comparison of our expected value and the output of `helm template` piped to `yq`. + +The `| tee /dev/stderr ` pieces direct any terminal output of the `helm template` and `yq` commands to stderr so that it doesn't interfere with `bats`. + +#### Test Examples + +Here are some examples of common test patterns: + +- Check that a value is disabled by default + + ``` + @test "ui/Service: no type by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + . | tee /dev/stderr | + yq -r '.spec.type' | tee /dev/stderr) + [ "${actual}" = "null" ] + } + ``` + + In this example, nothing is changed from the default templates (no `--set` flags), then we use `yq` to retrieve the value we're checking, `.spec.type`. + This output is then compared against our expected value (`null` in this case) in the assertion `[ "${actual}" = "null" ]`. + + +- Check that a template value is rendered to a specific value + ``` + @test "ui/Service: specified type" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'ui.service.type=LoadBalancer' \ + . | tee /dev/stderr | + yq -r '.spec.type' | tee /dev/stderr) + [ "${actual}" = "LoadBalancer" ] + } + ``` + + This is very similar to the last example, except we've changed a default value with the `--set` flag and correspondingly changed the expected value. + +- Check that a template value contains several values + ``` + @test "syncCatalog/Deployment: to-k8s only" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.toConsul=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-to-consul=false"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.toConsul=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-to-k8s"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + } + ``` + In this case, the same command is run twice in the same test. + This can be used to look for several things in the same field, or to check that something is not present that shouldn't be. + + *Note:* If testing more than two conditions, it would be good to separate the `helm template` part of the command from the `yq` sections to reduce redundant work. + +- Check that an entire template file is not rendered + ``` + @test "syncCatalog/Deployment: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] + } + ``` + Here we are check the length of the command output to see if the anything is rendered. + This style can easily be switched to check that a file is rendered instead. diff --git a/consul-helm/Chart.yaml b/consul-helm/Chart.yaml new file mode 100644 index 0000000..dc44a42 --- /dev/null +++ b/consul-helm/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +name: consul +version: 0.17.0 +description: Install and configure Consul on Kubernetes. +home: https://www.consul.io +sources: + - https://github.com/hashicorp/consul + - https://github.com/hashicorp/consul-helm + - https://github.com/hashicorp/consul-k8s diff --git a/consul-helm/LICENSE.md b/consul-helm/LICENSE.md new file mode 100644 index 0000000..82b4de9 --- /dev/null +++ b/consul-helm/LICENSE.md @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/consul-helm/Makefile b/consul-helm/Makefile new file mode 100644 index 0000000..ec8cd75 --- /dev/null +++ b/consul-helm/Makefile @@ -0,0 +1,6 @@ +TEST_IMAGE?=consul-helm-test + +test-docker: + @docker build --rm -t '$(TEST_IMAGE)' -f $(CURDIR)/test/docker/Test.dockerfile $(CURDIR) + +.PHONY: test-docker diff --git a/consul-helm/README.md b/consul-helm/README.md new file mode 100644 index 0000000..ea999cc --- /dev/null +++ b/consul-helm/README.md @@ -0,0 +1,38 @@ +# Consul Helm Chart + +This repository contains the official HashiCorp Helm chart for installing +and configuring Consul on Kubernetes. This chart supports multiple use +cases of Consul on Kubernetes depending on the values provided. + +For full documentation on this Helm chart along with all the ways you can +use Consul with Kubernetes, please see the +[Consul and Kubernetes documentation](https://www.consul.io/docs/platform/k8s/index.html). + +## Prerequisites + +To use the charts here, [Helm](https://helm.sh/) must be installed in your +Kubernetes cluster. Setting up Kubernetes and Helm and is outside the scope +of this README. Please refer to the Kubernetes and Helm documentation. + +The versions required are: + + * **Helm 2.10+** - This is the earliest version of Helm tested. It is possible + it works with earlier versions but this chart is untested for those versions. + * **Kubernetes 1.9+** - This is the earliest version of Kubernetes tested. + It is possible that this chart works with earlier versions but it is + untested. + +## Usage + +For now, we do not host a chart repository. To use the charts, you must +download this repository and unpack it into a directory. Either +[download a tagged release](https://github.com/hashicorp/consul-helm/releases) or +use `git checkout` to a tagged release. +Assuming this repository was unpacked into the directory `consul-helm`, the chart can +then be installed directly: + + helm install ./consul-helm + +Please see the many options supported in the `values.yaml` +file. These are also fully documented directly on the +[Consul website](https://www.consul.io/docs/platform/k8s/helm.html). diff --git a/consul-helm/helm-consul-values.yaml b/consul-helm/helm-consul-values.yaml new file mode 100644 index 0000000..0a42049 --- /dev/null +++ b/consul-helm/helm-consul-values.yaml @@ -0,0 +1,25 @@ +# Choose an optional name for the datacenter +global: + datacenter: minidc + +# Enable the Consul Web UI via a NodePort +ui: + service: + type: 'NodePort' + +# Enable Connect for secure communication between nodes and allow injection on namespaces +connectInject: + enabled: true + k8sAllowNamespaces: ["*"] + k8sDenyNamespaces: [] + +client: + enabled: true + +# Use only one Consul server for local development +server: + replicas: 1 + bootstrapExpect: 1 + disruptionBudget: + enabled: true + maxUnavailable: 0 diff --git a/consul-helm/templates/NOTES.txt b/consul-helm/templates/NOTES.txt new file mode 100644 index 0000000..26d90c9 --- /dev/null +++ b/consul-helm/templates/NOTES.txt @@ -0,0 +1,20 @@ + +Thank you for installing HashiCorp Consul! + +Now that you have deployed Consul, you should look over the docs on using +Consul with Kubernetes available here: + +https://www.consul.io/docs/platform/k8s/index.html + + +Your release is named {{ .Release.Name }}. To learn more about the release, try: + + $ helm status {{ .Release.Name }} + $ helm get {{ .Release.Name }} + + +{{- if (and .Values.global.bootstrapACLs (gt (len .Values.server.extraConfig) 3)) }} +Warning: Defining server extraConfig potentially disrupts the automatic ACL + bootstrapping required settings. This may cause future issues if + there are conflicts. +{{- end }} diff --git a/consul-helm/templates/_helpers.tpl b/consul-helm/templates/_helpers.tpl new file mode 100644 index 0000000..fe11cdf --- /dev/null +++ b/consul-helm/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to +this (by the DNS naming spec). Supports the legacy fullnameOverride setting +as well as the global.name setting. +*/}} +{{- define "consul.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.global.name -}} +{{- .Values.global.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "consul.chart" -}} +{{- printf "%s-helm" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "consul.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Compute the maximum number of unavailable replicas for the PodDisruptionBudget. +This defaults to (n/2)-1 where n is the number of members of the server cluster. +Special case of replica equaling 3 and allowing a minor disruption of 1 otherwise +use the integer value +Add a special case for replicas=1, where it should default to 0 as well. +*/}} +{{- define "consul.pdb.maxUnavailable" -}} +{{- if eq (int .Values.server.replicas) 1 -}} +{{ 0 }} +{{- else if .Values.server.disruptionBudget.maxUnavailable -}} +{{ .Values.server.disruptionBudget.maxUnavailable -}} +{{- else -}} +{{- if eq (int .Values.server.replicas) 3 -}} +{{- 1 -}} +{{- else -}} +{{- sub (div (int .Values.server.replicas) 2) 1 -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Inject extra environment vars in the format key:value, if populated +*/}} +{{- define "consul.extraEnvironmentVars" -}} +{{- if .extraEnvironmentVars -}} +{{- range $key, $value := .extraEnvironmentVars }} +- name: {{ $key }} + value: {{ $value | quote }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/consul-helm/templates/client-clusterrole.yaml b/consul-helm/templates/client-clusterrole.yaml new file mode 100644 index 0000000..f42afc9 --- /dev/null +++ b/consul-helm/templates/client-clusterrole.yaml @@ -0,0 +1,33 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-client + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if (or .Values.global.enablePodSecurityPolicies .Values.global.bootstrapACLs) }} +rules: +{{- if .Values.global.enablePodSecurityPolicies }} + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: + - {{ template "consul.fullname" . }}-client + verbs: + - use +{{- end }} +{{- if .Values.global.bootstrapACLs }} + - apiGroups: [""] + resources: + - secrets + resourceNames: + - {{ template "consul.fullname" . }}-client-acl-token + verbs: + - get +{{- end }} +{{- else}} +rules: [] +{{- end }} +{{- end }} diff --git a/consul-helm/templates/client-clusterrolebinding.yaml b/consul-helm/templates/client-clusterrolebinding.yaml new file mode 100644 index 0000000..d769b24 --- /dev/null +++ b/consul-helm/templates/client-clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-client + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-client +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-client + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/consul-helm/templates/client-config-configmap.yaml b/consul-helm/templates/client-config-configmap.yaml new file mode 100644 index 0000000..e96f50f --- /dev/null +++ b/consul-helm/templates/client-config-configmap.yaml @@ -0,0 +1,23 @@ +# ConfigMap with extra configuration specified directly to the chart +# for client agents only. +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "consul.fullname" . }}-client-config + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + extra-from-values.json: |- +{{ tpl .Values.client.extraConfig . | trimAll "\"" | indent 4 }} + {{- if (and .Values.connectInject.enabled .Values.connectInject.centralConfig.enabled) }} + central-config.json: |- + { + "enable_central_service_config": true + } + {{- end }} +{{- end }} diff --git a/consul-helm/templates/client-daemonset.yaml b/consul-helm/templates/client-daemonset.yaml new file mode 100644 index 0000000..f00d08c --- /dev/null +++ b/consul-helm/templates/client-daemonset.yaml @@ -0,0 +1,333 @@ +# DaemonSet to run the Consul clients on every node. +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "consul.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + {{- if .Values.client.updateStrategy }} + updateStrategy: + {{ tpl .Values.client.updateStrategy . | nindent 4 | trim }} + {{- end }} + selector: + matchLabels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: client + hasDNS: "true" + template: + metadata: + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: client + hasDNS: "true" + annotations: + "consul.hashicorp.com/connect-inject": "false" + {{- if .Values.client.annotations }} + {{- tpl .Values.client.annotations . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.client.affinity }} + affinity: + {{ tpl .Values.client.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.client.tolerations }} + tolerations: + {{ tpl .Values.client.tolerations . | nindent 8 | trim }} + {{- end }} + terminationGracePeriodSeconds: 10 + serviceAccountName: {{ template "consul.fullname" . }}-client + + {{- if .Values.client.priorityClassName }} + priorityClassName: {{ .Values.client.priorityClassName | quote }} + {{- end }} + + {{- if .Values.client.dnsPolicy }} + dnsPolicy: {{ .Values.client.dnsPolicy }} + {{- end }} + + volumes: + - name: data + {{- if .Values.client.dataDirectoryHostPath }} + hostPath: + path: {{ .Values.client.dataDirectoryHostPath }} + type: DirectoryOrCreate + {{- else }} + emptyDir: {} + {{- end }} + - name: config + configMap: + name: {{ template "consul.fullname" . }}-client-config + {{- if .Values.global.tls.enabled }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + - name: consul-ca-key + secret: + {{- if .Values.global.tls.caKey.secretName }} + secretName: {{ .Values.global.tls.caKey.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-key + {{- end }} + items: + - key: {{ default "tls.key" .Values.global.tls.caKey.secretKey }} + path: tls.key + - name: tls-client-cert + emptyDir: + # We're using tmpfs here so that + # client certs are not written to disk + medium: "Memory" + {{- end }} + {{- range .Values.client.extraVolumes }} + - name: userconfig-{{ .name }} + {{ .type }}: + {{- if (eq .type "configMap") }} + name: {{ .name }} + {{- else if (eq .type "secret") }} + secretName: {{ .name }} + {{- end }} + {{- end }} + {{- if .Values.global.bootstrapACLs }} + - name: aclconfig + emptyDir: {} + {{- end }} + containers: + - name: consul + image: "{{ default .Values.global.image .Values.client.image }}" + env: + - name: ADVERTISE_IP + valueFrom: + fieldRef: + {{- if not .Values.client.exposeGossipPorts }} + fieldPath: status.podIP + {{- else }} + # Clients will be exposed on their node's hostPort for external-to-k8s communication, + # so they need to advertise their host ip instead of their pod ip. + fieldPath: status.hostIP + {{- end }} + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODE + valueFrom: + fieldRef: + fieldPath: spec.nodeName + {{- if (and .Values.global.gossipEncryption.secretName .Values.global.gossipEncryption.secretKey) }} + - name: GOSSIP_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.global.gossipEncryption.secretName }} + key: {{ .Values.global.gossipEncryption.secretKey }} + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://localhost:8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + {{- include "consul.extraEnvironmentVars" .Values.client | nindent 12 }} + command: + - "/bin/sh" + - "-ec" + - | + CONSUL_FULLNAME="{{template "consul.fullname" . }}" + + exec /bin/consul agent \ + -node="${NODE}" \ + -advertise="${ADVERTISE_IP}" \ + -bind=0.0.0.0 \ + -client=0.0.0.0 \ + -node-meta=pod-name:${HOSTNAME} \ + -hcl='leave_on_terminate = true' \ + {{- if .Values.global.tls.enabled }} + -hcl='ca_file = "/consul/tls/ca/tls.crt"' \ + -hcl='cert_file = "/consul/tls/client/tls.crt"' \ + -hcl='key_file = "/consul/tls/client/tls.key"' \ + {{- if .Values.global.tls.verify }} + -hcl='verify_incoming_rpc = true' \ + -hcl='verify_outgoing = true' \ + -hcl='verify_server_hostname = true' \ + {{- end }} + -hcl='ports { https = 8501 }' \ + {{- if .Values.global.tls.httpsOnly }} + -hcl='ports { http = -1 }' \ + {{- end }} + {{- end }} + {{- if .Values.client.grpc }} + -hcl='ports { grpc = 8502 }' \ + {{- end }} + -config-dir=/consul/config \ + {{- range .Values.client.extraVolumes }} + {{- if .load }} + -config-dir=/consul/userconfig/{{ .name }} \ + {{- end }} + {{- end }} + {{- if .Values.global.bootstrapACLs}} + -config-dir=/consul/aclconfig \ + {{- end }} + -datacenter={{ .Values.global.datacenter }} \ + -data-dir=/consul/data \ + {{- if (and .Values.global.gossipEncryption.secretName .Values.global.gossipEncryption.secretKey) }} + -encrypt="${GOSSIP_KEY}" \ + {{- end }} + {{- if (.Values.client.join) and (gt (len .Values.client.join) 0) }} + {{- range $value := .Values.client.join }} + -retry-join="{{ $value }}" \ + {{- end }} + {{- else }} + {{- if .Values.server.enabled }} + {{- range $index := until (.Values.server.replicas | int) }} + -retry-join=${CONSUL_FULLNAME}-server-{{ $index }}.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc \ + {{- end }} + {{- end }} + {{- end }} + -domain={{ .Values.global.domain }} + volumeMounts: + - name: data + mountPath: /consul/data + - name: config + mountPath: /consul/config + {{- if .Values.global.tls.enabled }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + - name: tls-client-cert + mountPath: /consul/tls/client + readOnly: true + {{- end }} + {{- range .Values.client.extraVolumes }} + - name: userconfig-{{ .name }} + readOnly: true + mountPath: /consul/userconfig/{{ .name }} + {{- end }} + {{- if .Values.global.bootstrapACLs}} + - name: aclconfig + mountPath: /consul/aclconfig + {{- end }} + ports: + {{- if (or (not .Values.global.tls.enabled) (not .Values.global.tls.httpsOnly)) }} + - containerPort: 8500 + hostPort: 8500 + name: http + {{- end }} + {{- if .Values.global.tls.enabled }} + - containerPort: 8501 + hostPort: 8501 + name: https + {{- end }} + - containerPort: 8502 + hostPort: 8502 + name: grpc + - containerPort: 8301 + {{- if .Values.client.exposeGossipPorts }} + hostPort: 8301 + {{- end }} + protocol: "TCP" + name: serflan-tcp + - containerPort: 8301 + {{- if .Values.client.exposeGossipPorts }} + hostPort: 8301 + {{- end }} + protocol: "UDP" + name: serflan-udp + - containerPort: 8302 + name: serfwan + - containerPort: 8300 + name: server + - containerPort: 8600 + name: dns-tcp + protocol: "TCP" + - containerPort: 8600 + name: dns-udp + protocol: "UDP" + readinessProbe: + # NOTE(mitchellh): when our HTTP status endpoints support the + # proper status codes, we should switch to that. This is temporary. + exec: + command: + - "/bin/sh" + - "-ec" + - | + {{- if .Values.global.tls.enabled }} + curl \ + --cacert /consul/tls/ca/tls.crt \ + https://127.0.0.1:8501/v1/status/leader \ + {{- else }} + curl http://127.0.0.1:8500/v1/status/leader \ + {{- end }} + 2>/dev/null | grep -E '".+"' + {{- if .Values.client.resources }} + resources: + {{ tpl .Values.client.resources . | nindent 12 | trim }} + {{- end }} + {{- if (or .Values.global.bootstrapACLs .Values.global.tls.enabled) }} + initContainers: + {{- if .Values.global.bootstrapACLs }} + - name: client-acl-init + image: {{ .Values.global.imageK8S }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s acl-init \ + -secret-name="{{ template "consul.fullname" . }}-client-acl-token" \ + -k8s-namespace={{ .Release.Namespace }} \ + -init-type="client" + volumeMounts: + - name: aclconfig + mountPath: /consul/aclconfig + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: client-tls-init + image: "{{ default .Values.global.image .Values.client.image }}" + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + command: + - "/bin/sh" + - "-ec" + - | + cd /consul/tls/client + consul tls cert create -client \ + -additional-ipaddress=${HOST_IP} \ + -dc={{ .Values.global.datacenter }} \ + -domain={{ .Values.global.domain }} \ + -ca=/consul/tls/ca/cert/tls.crt \ + -key=/consul/tls/ca/key/tls.key + mv {{ .Values.global.datacenter }}-client-{{ .Values.global.domain }}-0.pem tls.crt + mv {{ .Values.global.datacenter }}-client-{{ .Values.global.domain }}-0-key.pem tls.key + volumeMounts: + - name: tls-client-cert + mountPath: /consul/tls/client + - name: consul-ca-cert + mountPath: /consul/tls/ca/cert + readOnly: true + - name: consul-ca-key + mountPath: /consul/tls/ca/key + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.client.nodeSelector }} + nodeSelector: + {{ tpl .Values.client.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/consul-helm/templates/client-podsecuritypolicy.yaml b/consul-helm/templates/client-podsecuritypolicy.yaml new file mode 100644 index 0000000..9d46a23 --- /dev/null +++ b/consul-helm/templates/client-podsecuritypolicy.yaml @@ -0,0 +1,66 @@ +{{- if (and .Values.global.enablePodSecurityPolicies (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled))) }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-client + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + {{- if .Values.client.dataDirectoryHostPath }} + - 'hostPath' + {{- end }} + hostNetwork: false + hostPorts: + {{- if (not (and .Values.global.tls.enabled .Values.global.tls.httpsOnly)) }} + # HTTP Port + - min: 8500 + max: 8500 + {{- end }} + {{- if .Values.global.tls.enabled }} + # HTTPS port + - min: 8501 + max: 8501 + {{- end }} + {{- if .Values.client.grpc }} + # gRPC Port + - min: 8502 + max: 8502 + {{- end }} + {{- if .Values.client.exposeGossipPorts }} + - min: 8301 + max: 8301 + {{- end }} + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false + {{- if .Values.client.dataDirectoryHostPath }} + allowedHostPaths: + - pathPrefix: {{ .Values.client.dataDirectoryHostPath | quote }} + readOnly: false + {{- end }} +{{- end }} diff --git a/consul-helm/templates/client-serviceaccount.yaml b/consul-helm/templates/client-serviceaccount.yaml new file mode 100644 index 0000000..dacdf8f --- /dev/null +++ b/consul-helm/templates/client-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-client + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end }} diff --git a/consul-helm/templates/client-snapshot-agent-clusterrole.yaml b/consul-helm/templates/client-snapshot-agent-clusterrole.yaml new file mode 100644 index 0000000..0430b3e --- /dev/null +++ b/consul-helm/templates/client-snapshot-agent-clusterrole.yaml @@ -0,0 +1,36 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if not (or .Values.global.enablePodSecurityPolicies .Values.global.bootstrapACLs) }} +rules: [] +{{- else }} +rules: +{{- end }} +{{- if .Values.global.enablePodSecurityPolicies }} + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: + - {{ template "consul.fullname" . }}-snapshot-agent + verbs: + - use +{{- end }} +{{- if .Values.global.bootstrapACLs }} + - apiGroups: [""] + resources: + - secrets + resourceNames: + - {{ template "consul.fullname" . }}-client-snapshot-agent-acl-token + verbs: + - get +{{- end }} +{{- else }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/client-snapshot-agent-clusterrolebinding.yaml b/consul-helm/templates/client-snapshot-agent-clusterrolebinding.yaml new file mode 100644 index 0000000..b4a2755 --- /dev/null +++ b/consul-helm/templates/client-snapshot-agent-clusterrolebinding.yaml @@ -0,0 +1,21 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-snapshot-agent +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/client-snapshot-agent-deployment.yaml b/consul-helm/templates/client-snapshot-agent-deployment.yaml new file mode 100644 index 0000000..ec1cf52 --- /dev/null +++ b/consul-helm/templates/client-snapshot-agent-deployment.yaml @@ -0,0 +1,140 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + replicas: {{ .Values.client.snapshotAgent.replicas }} + selector: + matchLabels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: client-snapshot-agent + template: + metadata: + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: client-snapshot-agent + annotations: + "consul.hashicorp.com/connect-inject": "false" + spec: + {{- if .Values.client.tolerations }} + tolerations: + {{ tpl .Values.client.tolerations . | nindent 8 | trim }} + {{- end }} + terminationGracePeriodSeconds: 10 + serviceAccountName: {{ template "consul.fullname" . }}-snapshot-agent + {{- if .Values.client.priorityClassName }} + priorityClassName: {{ .Values.client.priorityClassName | quote }} + {{- end }} + {{- if (or .Values.global.bootstrapACLs .Values.global.tls.enabled (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey)) }} + volumes: + {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) }} + - name: snapshot-config + secret: + secretName: {{ .Values.client.snapshotAgent.configSecret.secretName }} + items: + - key: {{ .Values.client.snapshotAgent.configSecret.secretKey }} + path: snapshot-config.json + {{- end }} + {{- if .Values.global.bootstrapACLs }} + - name: aclconfig + emptyDir: {} + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- end }} + containers: + - name: consul-snapshot-agent + image: "{{ default .Values.global.image .Values.client.image }}" + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + {{- if .Values.global.bootstrapACLs }} + - name: CONSUL_HTTP_TOKEN + valueFrom: + secretKeyRef: + name: "{{ template "consul.fullname" . }}-client-snapshot-agent-acl-token" + key: "token" + {{- end}} + command: + - "/bin/sh" + - "-ec" + - | + exec /bin/consul snapshot agent \ + {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) }} + -config-dir=/consul/config \ + {{- end }} + {{- if .Values.global.bootstrapACLs}} + -config-dir=/consul/aclconfig \ + {{- end }} + {{- if (or .Values.global.bootstrapACLs .Values.global.tls.enabled (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) ) }} + volumeMounts: + {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) }} + - name: snapshot-config + readOnly: true + mountPath: /consul/config + {{- end }} + {{- if .Values.global.bootstrapACLs}} + - name: aclconfig + mountPath: /consul/aclconfig + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.global.bootstrapACLs }} + initContainers: + - name: client-snapshot-agent-acl-init + image: {{ .Values.global.imageK8S }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s acl-init \ + -secret-name="{{ template "consul.fullname" . }}-client-snapshot-agent-acl-token" \ + -k8s-namespace={{ .Release.Namespace }} \ + -init-type="sync" + volumeMounts: + - name: aclconfig + mountPath: /consul/aclconfig + {{- end }} + {{- if .Values.client.nodeSelector }} + nodeSelector: + {{ tpl .Values.client.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/client-snapshot-agent-podsecuritypolicy.yaml b/consul-helm/templates/client-snapshot-agent-podsecuritypolicy.yaml new file mode 100644 index 0000000..a84fa38 --- /dev/null +++ b/consul-helm/templates/client-snapshot-agent-podsecuritypolicy.yaml @@ -0,0 +1,40 @@ +{{- if (and .Values.global.enablePodSecurityPolicies (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled))) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} +{{- end }} diff --git a/consul-helm/templates/client-snapshot-agent-serviceaccount.yaml b/consul-helm/templates/client-snapshot-agent-serviceaccount.yaml new file mode 100644 index 0000000..8e73401 --- /dev/null +++ b/consul-helm/templates/client-snapshot-agent-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/connect-inject-authmethod-clusterrole.yaml b/consul-helm/templates/connect-inject-authmethod-clusterrole.yaml new file mode 100644 index 0000000..07a295b --- /dev/null +++ b/consul-helm/templates/connect-inject-authmethod-clusterrole.yaml @@ -0,0 +1,19 @@ +{{- if and (not .Values.connectInject.certs.secretName) (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-connect-injector-authmethod-role + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: + - apiGroups: [""] + resources: + - serviceaccounts + verbs: + - get +{{- end }} +{{- end }} diff --git a/consul-helm/templates/connect-inject-authmethod-clusterrolebinding.yaml b/consul-helm/templates/connect-inject-authmethod-clusterrolebinding.yaml new file mode 100644 index 0000000..d88df03 --- /dev/null +++ b/consul-helm/templates/connect-inject-authmethod-clusterrolebinding.yaml @@ -0,0 +1,39 @@ +{{- if and (not .Values.connectInject.certs.secretName) (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-connect-injector-authmethod-authdelegator-role-binding + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "system:auth-delegator" +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-connect-injector-authmethod-svc-account + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-connect-injector-authmethod-serviceaccount-role-binding + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-connect-injector-authmethod-role +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-connect-injector-authmethod-svc-account + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/connect-inject-authmethod-serviceaccount.yaml b/consul-helm/templates/connect-inject-authmethod-serviceaccount.yaml new file mode 100644 index 0000000..e9c91d7 --- /dev/null +++ b/consul-helm/templates/connect-inject-authmethod-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.connectInject.certs.secretName) (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-connect-injector-authmethod-svc-account + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/connect-inject-clusterrole.yaml b/consul-helm/templates/connect-inject-clusterrole.yaml new file mode 100644 index 0000000..158eaea --- /dev/null +++ b/consul-helm/templates/connect-inject-clusterrole.yaml @@ -0,0 +1,37 @@ +# The ClusterRole to enable the Connect injector to get, list, watch and patch MutatingWebhookConfiguration. +{{- if and (not .Values.connectInject.certs.secretName) (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-connect-injector-webhook + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations"] + verbs: + - "get" + - "list" + - "watch" + - "patch" +{{- if .Values.global.enablePodSecurityPolicies }} +- apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: + - {{ template "consul.fullname" . }}-connect-injector-webhook + verbs: + - use +{{- end }} +{{- if and .Values.global.bootstrapACLs .Values.global.enableConsulNamespaces }} +- apiGroups: [""] + resources: + - secrets + resourceNames: + - {{ template "consul.fullname" . }}-connect-inject-acl-token + verbs: + - get +{{- end }} +{{- end }} diff --git a/consul-helm/templates/connect-inject-clusterrolebinding.yaml b/consul-helm/templates/connect-inject-clusterrolebinding.yaml new file mode 100644 index 0000000..f22f2e7 --- /dev/null +++ b/consul-helm/templates/connect-inject-clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and (not .Values.connectInject.certs.secretName) (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-connect-injector-webhook-admin-role-binding + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-connect-injector-webhook +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-connect-injector-webhook-svc-account + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/consul-helm/templates/connect-inject-deployment.yaml b/consul-helm/templates/connect-inject-deployment.yaml new file mode 100644 index 0000000..29d9580 --- /dev/null +++ b/consul-helm/templates/connect-inject-deployment.yaml @@ -0,0 +1,200 @@ +# The deployment for running the Connect sidecar injector +{{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} +{{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }}{{ fail "clients must be enabled for connect injection" }}{{ end }} +{{- if not .Values.client.grpc }}{{ fail "client.grpc must be true for connect injection" }}{{ end }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "consul.fullname" . }}-connect-injector-webhook-deployment + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: connect-injector + template: + metadata: + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: connect-injector + annotations: + "consul.hashicorp.com/connect-inject": "false" + spec: + {{- if not .Values.connectInject.certs.secretName }} + serviceAccountName: {{ template "consul.fullname" . }}-connect-injector-webhook-svc-account + {{- end }} + containers: + - name: sidecar-injector + image: "{{ default .Values.global.imageK8S .Values.connectInject.image }}" + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- /* A Consul client and ACL token is only necessary for the connect injector if namespaces are enabled */}} + {{- if .Values.global.enableConsulNamespaces }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if (and .Values.connectInject.aclInjectToken.secretName .Values.connectInject.aclInjectToken.secretKey) }} + - name: CONSUL_HTTP_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.connectInject.aclInjectToken.secretName }} + key: {{ .Values.connectInject.aclInjectToken.secretKey }} + {{- else if .Values.global.bootstrapACLs }} + - name: CONSUL_HTTP_TOKEN + valueFrom: + secretKeyRef: + name: "{{ template "consul.fullname" . }}-connect-inject-acl-token" + key: "token" + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + CONSUL_FULLNAME="{{template "consul.fullname" . }}" + + consul-k8s inject-connect \ + -default-inject={{ .Values.connectInject.default }} \ + -consul-image="{{ default .Values.global.image .Values.connectInject.imageConsul }}" \ + {{ if .Values.connectInject.imageEnvoy -}} + -envoy-image="{{ .Values.connectInject.imageEnvoy }}" \ + {{ end -}} + -consul-k8s-image="{{ default .Values.global.imageK8S .Values.connectInject.image }}" \ + -listen=:8080 \ + {{- if .Values.connectInject.overrideAuthMethodName }} + -acl-auth-method="{{ .Values.connectInject.overrideAuthMethodName }}" \ + {{- else if .Values.global.bootstrapACLs }} + -acl-auth-method="{{ template "consul.fullname" . }}-k8s-auth-method" \ + {{- end }} + + {{- if .Values.global.tls.enabled }} + -consul-ca-cert=/consul/tls/ca/tls.crt \ + {{- end }} + {{- if .Values.connectInject.centralConfig.enabled }} + -enable-central-config=true \ + {{- end }} + {{- if (and .Values.connectInject.centralConfig.enabled .Values.connectInject.centralConfig.defaultProtocol) }} + -default-protocol="{{ .Values.connectInject.centralConfig.defaultProtocol }}" \ + {{- end }} + {{- range $value := .Values.connectInject.k8sAllowNamespaces }} + -allow-k8s-namespace="{{ $value }}" \ + {{- end }} + {{- range $value := .Values.connectInject.k8sDenyNamespaces }} + -deny-k8s-namespace="{{ $value }}" \ + {{- end }} + {{- if .Values.global.enableConsulNamespaces }} + -enable-namespaces=true \ + {{- if .Values.connectInject.consulNamespaces.consulDestinationNamespace }} + -consul-destination-namespace={{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} \ + {{- end }} + {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} + -enable-k8s-namespace-mirroring=true \ + {{- if .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} + -k8s-namespace-mirroring-prefix={{ .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} \ + {{- end }} + {{- end }} + {{- if .Values.global.bootstrapACLs }} + -consul-cross-namespace-acl-policy=cross-namespace-policy \ + {{- end }} + {{- end }} + {{- if .Values.connectInject.certs.secretName }} + -tls-cert-file=/etc/connect-injector/certs/{{ .Values.connectInject.certs.certName }} \ + -tls-key-file=/etc/connect-injector/certs/{{ .Values.connectInject.certs.keyName }} + {{- else }} + -tls-auto=${CONSUL_FULLNAME}-connect-injector-cfg \ + -tls-auto-hosts=${CONSUL_FULLNAME}-connect-injector-svc,${CONSUL_FULLNAME}-connect-injector-svc.${NAMESPACE},${CONSUL_FULLNAME}-connect-injector-svc.${NAMESPACE}.svc + {{- end }} + livenessProbe: + httpGet: + path: /health/ready + port: 8080 + scheme: HTTPS + failureThreshold: 2 + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /health/ready + port: 8080 + scheme: HTTPS + failureThreshold: 2 + initialDelaySeconds: 2 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 + {{- if (or .Values.connectInject.certs.secretName .Values.global.tls.enabled) }} + volumeMounts: + {{- if .Values.connectInject.certs.secretName }} + - name: certs + mountPath: /etc/connect-injector/certs + readOnly: true + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + {{- if (or .Values.connectInject.certs.secretName .Values.global.tls.enabled) }} + volumes: + {{- if .Values.connectInject.certs.secretName }} + - name: certs + secret: + secretName: {{ .Values.connectInject.certs.secretName }} + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- end }} + {{- if and .Values.global.bootstrapACLs .Values.global.enableConsulNamespaces }} + initContainers: + - name: injector-acl-init + image: {{ .Values.global.imageK8S }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s acl-init \ + -secret-name="{{ template "consul.fullname" . }}-connect-inject-acl-token" \ + -k8s-namespace={{ .Release.Namespace }} \ + -init-type="sync" + {{- end }} + {{- if .Values.connectInject.nodeSelector }} + nodeSelector: + {{ tpl .Values.connectInject.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/consul-helm/templates/connect-inject-mutatingwebhook.yaml b/consul-helm/templates/connect-inject-mutatingwebhook.yaml new file mode 100644 index 0000000..2ca5762 --- /dev/null +++ b/consul-helm/templates/connect-inject-mutatingwebhook.yaml @@ -0,0 +1,30 @@ +# The MutatingWebhookConfiguration to enable the Connect injector. +{{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: {{ template "consul.fullname" . }}-connect-injector-cfg + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +webhooks: + - name: {{ template "consul.fullname" . }}-connect-injector.consul.hashicorp.com + clientConfig: + service: + name: {{ template "consul.fullname" . }}-connect-injector-svc + namespace: {{ .Release.Namespace }} + path: "/mutate" + caBundle: {{ .Values.connectInject.certs.caBundle | quote }} + rules: + - operations: [ "CREATE" ] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] +{{- if .Values.connectInject.namespaceSelector }} + namespaceSelector: +{{ tpl .Values.connectInject.namespaceSelector . | indent 6 }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/connect-inject-podsecuritypolicy.yaml b/consul-helm/templates/connect-inject-podsecuritypolicy.yaml new file mode 100644 index 0000000..aace192 --- /dev/null +++ b/consul-helm/templates/connect-inject-podsecuritypolicy.yaml @@ -0,0 +1,38 @@ +{{- if (and .Values.global.enablePodSecurityPolicies (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled))) }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-connect-injector-webhook + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} diff --git a/consul-helm/templates/connect-inject-service.yaml b/consul-helm/templates/connect-inject-service.yaml new file mode 100644 index 0000000..45dc2b3 --- /dev/null +++ b/consul-helm/templates/connect-inject-service.yaml @@ -0,0 +1,22 @@ +# The service for the Connect sidecar injector +{{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "consul.fullname" . }}-connect-injector-svc + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + ports: + - port: 443 + targetPort: 8080 + selector: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: connect-injector +{{- end }} + diff --git a/consul-helm/templates/connect-inject-serviceaccount.yaml b/consul-helm/templates/connect-inject-serviceaccount.yaml new file mode 100644 index 0000000..2bb0919 --- /dev/null +++ b/consul-helm/templates/connect-inject-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if and (not .Values.connectInject.certs.secretName) (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-connect-injector-webhook-svc-account + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end }} diff --git a/consul-helm/templates/dns-service.yaml b/consul-helm/templates/dns-service.yaml new file mode 100644 index 0000000..e7bca50 --- /dev/null +++ b/consul-helm/templates/dns-service.yaml @@ -0,0 +1,34 @@ +# Service for Consul DNS. +{{- if (or (and (ne (.Values.dns.enabled | toString) "-") .Values.dns.enabled) (and (eq (.Values.dns.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "consul.fullname" . }}-dns + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- if .Values.dns.annotations }} + annotations: + {{ tpl .Values.dns.annotations . | nindent 4 | trim }} + {{- end }} +spec: +{{- if .Values.dns.clusterIP }} + clusterIP: {{ .Values.dns.clusterIP }} +{{- end }} + ports: + - name: dns-tcp + port: 53 + protocol: "TCP" + targetPort: dns-tcp + - name: dns-udp + port: 53 + protocol: "UDP" + targetPort: dns-udp + selector: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + hasDNS: "true" +{{- end }} diff --git a/consul-helm/templates/enterprise-license-clusterrole.yaml b/consul-helm/templates/enterprise-license-clusterrole.yaml new file mode 100644 index 0000000..e3078a4 --- /dev/null +++ b/consul-helm/templates/enterprise-license-clusterrole.yaml @@ -0,0 +1,35 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if (and .Values.server.enterpriseLicense.secretName .Values.server.enterpriseLicense.secretKey) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-enterprise-license + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if or .Values.global.bootstrapACLs .Values.global.enablePodSecurityPolicies }} +rules: +{{- if .Values.global.bootstrapACLs }} + - apiGroups: [""] + resources: + - secrets + resourceNames: + - {{ template "consul.fullname" . }}-enterprise-license-acl-token + verbs: + - get +{{- end }} +{{- if .Values.global.enablePodSecurityPolicies }} + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: + - {{ template "consul.fullname" . }}-enterprise-license + verbs: + - use +{{- end }} +{{- else }} +rules: [] +{{- end }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/enterprise-license-clusterrolebinding.yaml b/consul-helm/templates/enterprise-license-clusterrolebinding.yaml new file mode 100644 index 0000000..6469adc --- /dev/null +++ b/consul-helm/templates/enterprise-license-clusterrolebinding.yaml @@ -0,0 +1,21 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if (and .Values.server.enterpriseLicense.secretName .Values.server.enterpriseLicense.secretKey) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-enterprise-license + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-enterprise-license +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-enterprise-license + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/enterprise-license-job.yaml b/consul-helm/templates/enterprise-license-job.yaml new file mode 100644 index 0000000..ddc6eff --- /dev/null +++ b/consul-helm/templates/enterprise-license-job.yaml @@ -0,0 +1,116 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if (and .Values.server.enterpriseLicense.secretName .Values.server.enterpriseLicense.secretKey) }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "consul.fullname" . }}-license + labels: + app.kubernetes.io/managed-by: {{.Release.Service | quote }} + app.kubernetes.io/instance: {{.Release.Name | quote }} + helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}" + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-weight": "100" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + metadata: + name: {{ template "consul.fullname" . }}-license + labels: + app.kubernetes.io/managed-by: {{.Release.Service | quote }} + app.kubernetes.io/instance: {{.Release.Name | quote }} + helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}" + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: license + annotations: + "consul.hashicorp.com/connect-inject": "false" + spec: + restartPolicy: Never + serviceAccountName: {{ template "consul.fullname" . }}-enterprise-license + {{- if .Values.global.tls.enabled }} + volumes: + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + containers: + - name: apply-enterprise-license + image: "{{ default .Values.global.image .Values.server.image }}" + env: + - name: ENTERPRISE_LICENSE + valueFrom: + secretKeyRef: + name: {{ .Values.server.enterpriseLicense.secretName }} + key: {{ .Values.server.enterpriseLicense.secretKey }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://{{ template "consul.fullname" . }}-server:8501 + {{- else }} + value: http://{{ template "consul.fullname" . }}-server:8500 + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end}} + {{- if .Values.global.bootstrapACLs }} + - name: CONSUL_HTTP_TOKEN + valueFrom: + secretKeyRef: + name: "{{ template "consul.fullname" . }}-enterprise-license-acl-token" + key: "token" + {{- end}} + command: + - "/bin/sh" + - "-c" + - | + # Create a script that we can execute with the timeout command. + cat > apply-license.sh << 'EOF' + #!/bin/sh + while true; do + echo "Applying license..." + if consul license put "${ENTERPRISE_LICENSE}"; then + echo "License applied successfully" + break + fi + echo "Retrying in 2s..." + sleep 2 + done + EOF + chmod +x ./apply-license.sh + + # Time out after 20 minutes. + timeout -t 1200 ./apply-license.sh + {{- if .Values.global.tls.enabled }} + volumeMounts: + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- if .Values.global.bootstrapACLs }} + initContainers: + - name: ent-license-acl-init + image: {{ .Values.global.imageK8S }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s acl-init \ + -secret-name="{{ template "consul.fullname" . }}-enterprise-license-acl-token" \ + -k8s-namespace={{ .Release.Namespace }} \ + -init-type="sync" + {{- end }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/enterprise-license-podsecuritypolicy.yaml b/consul-helm/templates/enterprise-license-podsecuritypolicy.yaml new file mode 100644 index 0000000..0898c20 --- /dev/null +++ b/consul-helm/templates/enterprise-license-podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if (and .Values.server.enterpriseLicense.secretName .Values.server.enterpriseLicense.secretKey) }} +{{- if .Values.global.enablePodSecurityPolicies }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-enterprise-license + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + privileged: false + # Allow core volume types. + volumes: + - 'secret' + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/enterprise-license-serviceaccount.yaml b/consul-helm/templates/enterprise-license-serviceaccount.yaml new file mode 100644 index 0000000..ccb0f37 --- /dev/null +++ b/consul-helm/templates/enterprise-license-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if (and .Values.server.enterpriseLicense.secretName .Values.server.enterpriseLicense.secretKey) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-enterprise-license + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/mesh-gateway-clusterrole.yaml b/consul-helm/templates/mesh-gateway-clusterrole.yaml new file mode 100644 index 0000000..33f4dad --- /dev/null +++ b/consul-helm/templates/mesh-gateway-clusterrole.yaml @@ -0,0 +1,34 @@ +{{- if .Values.meshGateway.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-mesh-gateway + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: mesh-gateway +{{- if or .Values.global.bootstrapACLs .Values.global.enablePodSecurityPolicies }} +rules: +{{- if .Values.global.enablePodSecurityPolicies }} + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: + - {{ template "consul.fullname" . }}-mesh-gateway + verbs: + - use +{{- end }} +{{- if .Values.global.bootstrapACLs }} + - apiGroups: [""] + resources: + - secrets + resourceNames: + - {{ template "consul.fullname" . }}-mesh-gateway-acl-token + verbs: + - get +{{- end }} +{{- else }} +rules: [] +{{- end }} +{{- end }} diff --git a/consul-helm/templates/mesh-gateway-clusterrolebinding.yaml b/consul-helm/templates/mesh-gateway-clusterrolebinding.yaml new file mode 100644 index 0000000..f8150eb --- /dev/null +++ b/consul-helm/templates/mesh-gateway-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.meshGateway.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-mesh-gateway + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: mesh-gateway +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-mesh-gateway +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-mesh-gateway + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/consul-helm/templates/mesh-gateway-deployment.yaml b/consul-helm/templates/mesh-gateway-deployment.yaml new file mode 100644 index 0000000..ff63e88 --- /dev/null +++ b/consul-helm/templates/mesh-gateway-deployment.yaml @@ -0,0 +1,204 @@ +{{- if .Values.meshGateway.enabled }} +{{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} +{{- if not .Values.client.grpc }}{{ fail "client.grpc must be true" }}{{ end -}} +{{- /* The below test checks if clients are disabled (and if so, fails). We use the conditional from other client files and prepend 'not' */ -}} +{{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }}{{ fail "clients must be enabled" }}{{ end -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "consul.fullname" . }}-mesh-gateway + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: mesh-gateway +spec: + replicas: {{ .Values.meshGateway.replicas }} + selector: + matchLabels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: mesh-gateway + template: + metadata: + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: mesh-gateway + annotations: + "consul.hashicorp.com/connect-inject": "false" + {{- if .Values.meshGateway.annotations }} + {{- tpl .Values.meshGateway.annotations . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.meshGateway.affinity }} + affinity: + {{ tpl .Values.meshGateway.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.meshGateway.tolerations }} + tolerations: + {{ tpl .Values.meshGateway.tolerations . | nindent 8 | trim }} + {{- end }} + terminationGracePeriodSeconds: 10 + serviceAccountName: {{ template "consul.fullname" . }}-mesh-gateway + volumes: + - name: consul-bin + emptyDir: {} + {{- if .Values.global.tls.enabled }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- if .Values.meshGateway.hostNetwork }} + hostNetwork: {{ .Values.meshGateway.hostNetwork }} + {{- end }} + {{- if .Values.meshGateway.dnsPolicy }} + dnsPolicy: {{ .Values.meshGateway.dnsPolicy }} + {{- end }} + initContainers: + # We use the Envoy image as our base image so we use an init container to + # copy the Consul binary to a shared directory that can be used when + # starting Envoy. + - name: copy-consul-bin + image: {{ .Values.global.image | quote }} + command: + - cp + - /bin/consul + - /consul-bin/consul + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + {{- if .Values.global.bootstrapACLs }} + # Wait for secret containing acl token to be ready. + # Doesn't do anything with it but when the main container starts we + # know that it's been created. + - name: mesh-gateway-acl-init + image: {{ .Values.global.imageK8S }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s acl-init \ + -secret-name="{{ template "consul.fullname" . }}-mesh-gateway-acl-token" \ + -k8s-namespace={{ .Release.Namespace }} \ + -init-type="sync" + {{- end }} + containers: + - name: mesh-gateway + image: {{ .Values.meshGateway.imageEnvoy | quote }} + {{- if .Values.meshGateway.resources }} + resources: + {{ tpl .Values.meshGateway.resources . | nindent 12 | trim }} + {{- end }} + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + {{- if .Values.global.tls.enabled }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if .Values.meshGateway.wanAddress.useNodeName }} + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + {{- end }} + {{- if .Values.global.bootstrapACLs }} + - name: CONSUL_HTTP_TOKEN + valueFrom: + secretKeyRef: + name: "{{ template "consul.fullname" . }}-mesh-gateway-acl-token" + key: "token" + {{- end}} + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_GRPC_ADDR + value: https://$(HOST_IP):8502 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + - name: CONSUL_GRPC_ADDR + value: $(HOST_IP):8502 + {{- end }} + command: + # /bin/sh -c is needed so we can use the pod-specific environment + # variables. + - "/bin/sh" + - "-ec" + - | + exec /consul-bin/consul connect envoy \ + -mesh-gateway \ + -register \ + -address="${POD_IP}:{{ .Values.meshGateway.containerPort }}" \ + {{- if .Values.meshGateway.wanAddress.host }} + -wan-address="{{ .Values.meshGateway.wanAddress.host }}:{{ .Values.meshGateway.wanAddress.port }}" \ + {{- else if .Values.meshGateway.wanAddress.useNodeName }} + -wan-address="${NODE_NAME}:{{ .Values.meshGateway.wanAddress.port }}" \ + {{- else if .Values.meshGateway.wanAddress.useNodeIP }} + -wan-address="${HOST_IP}:{{ .Values.meshGateway.wanAddress.port }}" \ + {{- end }} + {{- if and .Values.meshGateway.consulServiceName }} + {{- if and .Values.global.bootstrapACLs (ne .Values.meshGateway.consulServiceName "mesh-gateway") }}{{ fail "if global.bootstrapACLs is true, meshGateway.consulServiceName cannot be set" }}{{ end }} + -service={{ .Values.meshGateway.consulServiceName | quote }} \ + {{- end }} + {{- if .Values.meshGateway.enableHealthChecks }} + livenessProbe: + tcpSocket: + port: {{ .Values.meshGateway.containerPort }} + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + tcpSocket: + port: {{ .Values.meshGateway.containerPort }} + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + {{- end }} + ports: + - name: gateway + containerPort: {{ .Values.meshGateway.containerPort }} + {{- if .Values.meshGateway.hostPort }} + hostPort: {{ .Values.meshGateway.hostPort }} + {{- end }} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-ec", "/consul-bin/consul services deregister -id=\"{{ default "mesh-gateway" .Values.meshGateway.consulServiceName }}\""] + + {{- if .Values.meshGateway.priorityClassName }} + priorityClassName: {{ .Values.meshGateway.priorityClassName | quote }} + {{- end }} + {{- if .Values.meshGateway.nodeSelector }} + nodeSelector: + {{ tpl .Values.meshGateway.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/consul-helm/templates/mesh-gateway-podsecuritypolicy.yaml b/consul-helm/templates/mesh-gateway-podsecuritypolicy.yaml new file mode 100644 index 0000000..947d513 --- /dev/null +++ b/consul-helm/templates/mesh-gateway-podsecuritypolicy.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.global.enablePodSecurityPolicies .Values.meshGateway.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-mesh-gateway + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: mesh-gateway +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} diff --git a/consul-helm/templates/mesh-gateway-service.yaml b/consul-helm/templates/mesh-gateway-service.yaml new file mode 100644 index 0000000..7bd7ec2 --- /dev/null +++ b/consul-helm/templates/mesh-gateway-service.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.meshGateway.enabled .Values.meshGateway.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "consul.fullname" . }}-mesh-gateway + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: mesh-gateway + {{- if .Values.meshGateway.service.annotations }} + annotations: + {{ tpl .Values.meshGateway.service.annotations . | nindent 4 | trim }} + {{- end }} +spec: + selector: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: mesh-gateway + ports: + - name: gateway + port: {{ .Values.meshGateway.service.port }} + targetPort: {{ .Values.meshGateway.containerPort }} + {{- if .Values.meshGateway.service.nodePort }} + nodePort: {{ .Values.meshGateway.service.nodePort }} + {{- end}} + type: {{ .Values.meshGateway.service.type }} + {{- if .Values.meshGateway.service.additionalSpec }} + {{ tpl .Values.meshGateway.service.additionalSpec . | nindent 2 | trim }} + {{- end }} +{{- end }} diff --git a/consul-helm/templates/mesh-gateway-serviceaccount.yaml b/consul-helm/templates/mesh-gateway-serviceaccount.yaml new file mode 100644 index 0000000..70e8d94 --- /dev/null +++ b/consul-helm/templates/mesh-gateway-serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.meshGateway.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-mesh-gateway + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: mesh-gateway +{{- end }} diff --git a/consul-helm/templates/server-acl-init-cleanup-clusterrole.yaml b/consul-helm/templates/server-acl-init-cleanup-clusterrole.yaml new file mode 100644 index 0000000..d70db13 --- /dev/null +++ b/consul-helm/templates/server-acl-init-cleanup-clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-server-acl-init-cleanup + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["get", "delete"] +{{- if .Values.global.enablePodSecurityPolicies }} + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: + - {{ template "consul.fullname" . }}-server-acl-init-cleanup + verbs: + - use +{{- end }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/server-acl-init-cleanup-clusterrolebinding.yaml b/consul-helm/templates/server-acl-init-cleanup-clusterrolebinding.yaml new file mode 100644 index 0000000..789b552 --- /dev/null +++ b/consul-helm/templates/server-acl-init-cleanup-clusterrolebinding.yaml @@ -0,0 +1,21 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-server-acl-init-cleanup + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-server-acl-init-cleanup +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-server-acl-init-cleanup + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/server-acl-init-cleanup-job.yaml b/consul-helm/templates/server-acl-init-cleanup-job.yaml new file mode 100644 index 0000000..11617fb --- /dev/null +++ b/consul-helm/templates/server-acl-init-cleanup-job.yaml @@ -0,0 +1,56 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +{{- /* See reason for this in server-acl-init-job.yaml */ -}} +{{- if eq (int .Values.server.updatePartition) 0 }} +# This job deletes the server-acl-init job once it completes successfully. +# It runs as a helm hook because it only needs to run when the server-acl-init +# Job gets recreated which only happens during an install or upgrade. +# We also utilize the helm hook-delete-policy to delete this job itself. +# We want to delete the server-acl-init job because once it runs successfully +# it's not needed and also because if it stays around then when users run +# helm upgrade with values that change the spec of the job, Kubernetes errors +# because the job spec is immutable. If the job is deleted, then a new job +# is created and there's no error. +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "consul.fullname" . }}-server-acl-init-cleanup + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "0" + # If the hook fails then all that happens is we didn't delete the job. + # There's no reason for *this* job to stay around in that case so delete + # regardless of success. + "helm.sh/hook-delete-policy": hook-succeeded,hook-failed +spec: + template: + metadata: + name: {{ template "consul.fullname" . }}-server-acl-init-cleanup + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: server-acl-init-cleanup + annotations: + "consul.hashicorp.com/connect-inject": "false" + spec: + restartPolicy: Never + serviceAccountName: {{ template "consul.fullname" . }}-server-acl-init-cleanup + containers: + - name: server-acl-init-cleanup + image: {{ .Values.global.imageK8S }} + command: + - consul-k8s + args: + - delete-completed-job + - -k8s-namespace={{ .Release.Namespace }} + - {{ template "consul.fullname" . }}-server-acl-init + {{- end }} + {{- end }} + {{- end }} diff --git a/consul-helm/templates/server-acl-init-cleanup-podsecuritypolicy.yaml b/consul-helm/templates/server-acl-init-cleanup-podsecuritypolicy.yaml new file mode 100644 index 0000000..300ff20 --- /dev/null +++ b/consul-helm/templates/server-acl-init-cleanup-podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +{{- if .Values.global.enablePodSecurityPolicies }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-server-acl-init-cleanup + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + privileged: false + # Allow core volume types. + volumes: + - 'secret' + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false + {{- end }} + {{- end }} + {{- end }} diff --git a/consul-helm/templates/server-acl-init-cleanup-serviceaccount.yaml b/consul-helm/templates/server-acl-init-cleanup-serviceaccount.yaml new file mode 100644 index 0000000..79b1d5c --- /dev/null +++ b/consul-helm/templates/server-acl-init-cleanup-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-server-acl-init-cleanup + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/server-acl-init-clusterrole.yaml b/consul-helm/templates/server-acl-init-clusterrole.yaml new file mode 100644 index 0000000..15c14ba --- /dev/null +++ b/consul-helm/templates/server-acl-init-clusterrole.yaml @@ -0,0 +1,50 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-server-acl-init + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: + - apiGroups: [""] + resources: + - pods + verbs: + - list + - apiGroups: [""] + resources: + - secrets + verbs: + - create + - get + - apiGroups: ["apps"] + resources: + - statefulsets + verbs: + - get +{{- if .Values.connectInject.enabled }} + - apiGroups: [""] + resources: + - serviceaccounts + verbs: + - get + - apiGroups: [""] + resources: + - services + verbs: + - get +{{- end }} +{{- if .Values.global.enablePodSecurityPolicies }} + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: + - {{ template "consul.fullname" . }}-server-acl-init + verbs: + - use +{{- end }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/server-acl-init-clusterrolebinding.yaml b/consul-helm/templates/server-acl-init-clusterrolebinding.yaml new file mode 100644 index 0000000..620a01a --- /dev/null +++ b/consul-helm/templates/server-acl-init-clusterrolebinding.yaml @@ -0,0 +1,21 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-server-acl-init + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-server-acl-init +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-server-acl-init + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/server-acl-init-job.yaml b/consul-helm/templates/server-acl-init-job.yaml new file mode 100644 index 0000000..546c7d2 --- /dev/null +++ b/consul-helm/templates/server-acl-init-job.yaml @@ -0,0 +1,130 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +{{- /* We don't render this job when server.updatePartition > 0 because that + means a server rollout is in progress and this job won't complete unless + the rollout is finished (which won't happen until the partition is 0). + If we ran it in this case, then the job would not complete which would cause + the server-acl-init-cleanup hook to run indefinitely which would cause the + helm upgrade command to hang. +*/ -}} +{{- if eq (int .Values.server.updatePartition) 0 }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "consul.fullname" . }}-server-acl-init + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + template: + metadata: + name: {{ template "consul.fullname" . }}-server-acl-init + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: server-acl-init + annotations: + "consul.hashicorp.com/connect-inject": "false" + spec: + restartPolicy: Never + serviceAccountName: {{ template "consul.fullname" . }}-server-acl-init + {{- if .Values.global.tls.enabled }} + volumes: + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + containers: + - name: post-install-job + image: {{ .Values.global.imageK8S }} + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.global.tls.enabled }} + volumeMounts: + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s server-acl-init \ + -server-label-selector=component=server,app={{ template "consul.name" . }},release={{ .Release.Name }} \ + -resource-prefix={{ template "consul.fullname" . }} \ + -k8s-namespace={{ .Release.Namespace }} \ + {{- if .Values.global.tls.enabled }} + -use-https \ + -consul-ca-cert=/consul/tls/ca/tls.crt \ + -consul-tls-server-name=server.{{ .Values.global.datacenter }}.{{ .Values.global.domain }} \ + {{- end }} + {{- if .Values.syncCatalog.enabled }} + -create-sync-token=true \ + {{- end }} + {{- if (or (and (ne (.Values.dns.enabled | toString) "-") .Values.dns.enabled) (and (eq (.Values.dns.enabled | toString) "-") .Values.global.enabled)) }} + -allow-dns=true \ + {{- end }} + {{- if .Values.connectInject.enabled }} + -create-inject-auth-method=true \ + {{- end }} + {{- if .Values.meshGateway.enabled }} + -create-mesh-gateway-token=true \ + {{- end }} + {{- if .Values.connectInject.aclBindingRuleSelector }} + -acl-binding-rule-selector={{ .Values.connectInject.aclBindingRuleSelector }} \ + {{- end }} + {{- if (and .Values.server.enterpriseLicense.secretName .Values.server.enterpriseLicense.secretKey) }} + -create-enterprise-license-token=true \ + {{- end }} + {{- if .Values.client.snapshotAgent.enabled }} + -create-snapshot-agent-token=true \ + {{- end }} + {{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} + -create-client-token=false \ + {{- end }} + {{- if .Values.global.enableConsulNamespaces }} + -enable-namespaces=true \ + {{- /* syncCatalog must be enabled to set sync flags */}} + {{- if (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} + {{- if .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} + -consul-sync-destination-namespace={{ .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} \ + {{- end }} + {{- if .Values.syncCatalog.consulNamespaces.mirroringK8S }} + -enable-sync-k8s-namespace-mirroring=true \ + {{- if .Values.syncCatalog.consulNamespaces.mirroringK8SPrefix }} + -sync-k8s-namespace-mirroring-prefix={{ .Values.syncCatalog.consulNamespaces.mirroringK8SPrefix }} \ + {{- end }} + {{- end }} + {{- end }} + {{- /* connectInject must be enabled to set inject flags */}} + {{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} + -create-inject-namespace-token=true \ + {{- if .Values.connectInject.consulNamespaces.consulDestinationNamespace }} + -consul-inject-destination-namespace={{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} \ + {{- end }} + {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} + -enable-inject-k8s-namespace-mirroring=true \ + {{- if .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} + -inject-k8s-namespace-mirroring-prefix={{ .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} \ + {{- end }} + {{- end }} + {{- end }} + {{- end }} + -expected-replicas={{ .Values.server.replicas }} +{{- end }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/server-acl-init-podsecuritypolicy.yaml b/consul-helm/templates/server-acl-init-podsecuritypolicy.yaml new file mode 100644 index 0000000..d39e06f --- /dev/null +++ b/consul-helm/templates/server-acl-init-podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +{{- if .Values.global.enablePodSecurityPolicies }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-server-acl-init + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + privileged: false + # Allow core volume types. + volumes: + - 'secret' + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false + {{- end }} + {{- end }} + {{- end }} diff --git a/consul-helm/templates/server-acl-init-serviceaccount.yaml b/consul-helm/templates/server-acl-init-serviceaccount.yaml new file mode 100644 index 0000000..7c05643 --- /dev/null +++ b/consul-helm/templates/server-acl-init-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.bootstrapACLs }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-server-acl-init + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/server-clusterrole.yaml b/consul-helm/templates/server-clusterrole.yaml new file mode 100644 index 0000000..e58d964 --- /dev/null +++ b/consul-helm/templates/server-clusterrole.yaml @@ -0,0 +1,22 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-server + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.global.enablePodSecurityPolicies }} +rules: +- apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: + - {{ template "consul.fullname" . }}-server + verbs: + - use +{{- else }} +rules: [] +{{- end }} +{{- end }} diff --git a/consul-helm/templates/server-clusterrolebinding.yaml b/consul-helm/templates/server-clusterrolebinding.yaml new file mode 100644 index 0000000..b81f8c3 --- /dev/null +++ b/consul-helm/templates/server-clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-server + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-server +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-server + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/consul-helm/templates/server-config-configmap.yaml b/consul-helm/templates/server-config-configmap.yaml new file mode 100644 index 0000000..e7c3f05 --- /dev/null +++ b/consul-helm/templates/server-config-configmap.yaml @@ -0,0 +1,68 @@ +# StatefulSet to run the actual Consul server cluster. +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "consul.fullname" . }}-server-config + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + extra-from-values.json: |- +{{ tpl .Values.server.extraConfig . | trimAll "\"" | indent 4 }} + {{- if .Values.global.bootstrapACLs }} + acl-config.json: |- + { + "acl": { + "enabled": true, + "default_policy": "deny", + "down_policy": "extend-cache", + "enable_token_persistence": true + } + } + {{- end }} + {{- if and .Values.connectInject.enabled .Values.connectInject.centralConfig.enabled }} + central-config.json: |- + { + "enable_central_service_config": true + } + {{- if gt (len .Values.connectInject.centralConfig.proxyDefaults) 3 }} + proxy-defaults-config.json: |- + { + "config_entries": { + "bootstrap": [ + { + "kind": "proxy-defaults", + "name": "global", + {{- if and .Values.meshGateway.enabled .Values.meshGateway.globalMode }} + "mesh_gateway": { + "mode": {{ .Values.meshGateway.globalMode | quote }} + }, + {{- end }} + "config": +{{ tpl .Values.connectInject.centralConfig.proxyDefaults . | trimAll "\"" | indent 14 }} + } + ] + } + } + {{- else if and .Values.meshGateway.enabled .Values.meshGateway.globalMode }} + proxy-defaults-config.json: |- + { + "config_entries": { + "bootstrap": [ + { + "kind": "proxy-defaults", + "name": "global", + "mesh_gateway": { + "mode": {{ .Values.meshGateway.globalMode | quote }} + } + } + ] + } + } + {{- end }} + {{- end }} +{{- end }} diff --git a/consul-helm/templates/server-disruptionbudget.yaml b/consul-helm/templates/server-disruptionbudget.yaml new file mode 100644 index 0000000..01bef39 --- /dev/null +++ b/consul-helm/templates/server-disruptionbudget.yaml @@ -0,0 +1,21 @@ +# PodDisruptionBudget to prevent degrading the server cluster through +# voluntary cluster changes. +{{- if (and .Values.server.disruptionBudget.enabled (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled))) }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "consul.fullname" . }}-server + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + maxUnavailable: {{ template "consul.pdb.maxUnavailable" . }} + selector: + matchLabels: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: server +{{- end }} diff --git a/consul-helm/templates/server-podsecuritypolicy.yaml b/consul-helm/templates/server-podsecuritypolicy.yaml new file mode 100644 index 0000000..4687fda --- /dev/null +++ b/consul-helm/templates/server-podsecuritypolicy.yaml @@ -0,0 +1,40 @@ +{{- if (and .Values.global.enablePodSecurityPolicies (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled))) }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-server + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} diff --git a/consul-helm/templates/server-service.yaml b/consul-helm/templates/server-service.yaml new file mode 100644 index 0000000..4abaf8a --- /dev/null +++ b/consul-helm/templates/server-service.yaml @@ -0,0 +1,69 @@ +# Headless service for Consul server DNS entries. This service should only +# point to Consul servers. For access to an agent, one should assume that +# the agent is installed locally on the node and the NODE_IP should be used. +# If the node can't run a Consul agent, then this service can be used to +# communicate directly to a server agent. +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "consul.fullname" . }}-server + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + # This must be set in addition to publishNotReadyAddresses due + # to an open issue where it may not work: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None + # We want the servers to become available even if they're not ready + # since this DNS is also used for join operations. + publishNotReadyAddresses: true + ports: + {{- if (or (not .Values.global.tls.enabled) (not .Values.global.tls.httpsOnly)) }} + - name: http + port: 8500 + targetPort: 8500 + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: https + port: 8501 + targetPort: 8501 + {{- end }} + - name: serflan-tcp + protocol: "TCP" + port: 8301 + targetPort: 8301 + - name: serflan-udp + protocol: "UDP" + port: 8301 + targetPort: 8301 + - name: serfwan-tcp + protocol: "TCP" + port: 8302 + targetPort: 8302 + - name: serfwan-udp + protocol: "UDP" + port: 8302 + targetPort: 8302 + - name: server + port: 8300 + targetPort: 8300 + - name: dns-tcp + protocol: "TCP" + port: 8600 + targetPort: dns-tcp + - name: dns-udp + protocol: "UDP" + port: 8600 + targetPort: dns-udp + selector: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: server +{{- end }} diff --git a/consul-helm/templates/server-serviceaccount.yaml b/consul-helm/templates/server-serviceaccount.yaml new file mode 100644 index 0000000..0b35ac4 --- /dev/null +++ b/consul-helm/templates/server-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-server + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end }} diff --git a/consul-helm/templates/server-statefulset.yaml b/consul-helm/templates/server-statefulset.yaml new file mode 100644 index 0000000..f51b49b --- /dev/null +++ b/consul-helm/templates/server-statefulset.yaml @@ -0,0 +1,248 @@ +# StatefulSet to run the actual Consul server cluster. +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "consul.fullname" . }}-server + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: server +spec: + serviceName: {{ template "consul.fullname" . }}-server + podManagementPolicy: Parallel + replicas: {{ .Values.server.replicas }} + {{- if (gt (int .Values.server.updatePartition) 0) }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: {{ .Values.server.updatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: server + hasDNS: "true" + template: + metadata: + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: server + hasDNS: "true" + annotations: + "consul.hashicorp.com/connect-inject": "false" + {{- if .Values.server.annotations }} + {{- tpl .Values.server.annotations . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.server.affinity }} + affinity: + {{ tpl .Values.server.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: + {{ tpl .Values.server.tolerations . | nindent 8 | trim }} + {{- end }} + terminationGracePeriodSeconds: 30 + serviceAccountName: {{ template "consul.fullname" . }}-server + securityContext: + fsGroup: 1000 + volumes: + - name: config + configMap: + name: {{ template "consul.fullname" . }}-server-config + {{- if .Values.global.tls.enabled }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + - name: tls-server-cert + secret: + secretName: {{ template "consul.fullname" . }}-server-cert + {{- end }} + {{- range .Values.server.extraVolumes }} + - name: userconfig-{{ .name }} + {{ .type }}: + {{- if (eq .type "configMap") }} + name: {{ .name }} + {{- else if (eq .type "secret") }} + secretName: {{ .name }} + {{- end }} + {{- end }} + {{- if .Values.server.priorityClassName }} + priorityClassName: {{ .Values.server.priorityClassName | quote }} + {{- end }} + containers: + - name: consul + image: "{{ default .Values.global.image .Values.server.image }}" + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if (and .Values.global.gossipEncryption.secretName .Values.global.gossipEncryption.secretKey) }} + - name: GOSSIP_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.global.gossipEncryption.secretName }} + key: {{ .Values.global.gossipEncryption.secretKey }} + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://localhost:8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + {{- include "consul.extraEnvironmentVars" .Values.server | nindent 12 }} + command: + - "/bin/sh" + - "-ec" + - | + CONSUL_FULLNAME="{{template "consul.fullname" . }}" + + exec /bin/consul agent \ + -advertise="${POD_IP}" \ + -bind=0.0.0.0 \ + -bootstrap-expect={{ .Values.server.bootstrapExpect }} \ + {{- if .Values.global.tls.enabled }} + -hcl='ca_file = "/consul/tls/ca/tls.crt"' \ + -hcl='cert_file = "/consul/tls/server/tls.crt"' \ + -hcl='key_file = "/consul/tls/server/tls.key"' \ + {{- if .Values.global.tls.verify }} + -hcl='verify_incoming_rpc = true' \ + -hcl='verify_outgoing = true' \ + -hcl='verify_server_hostname = true' \ + {{- end }} + -hcl='ports { https = 8501 }' \ + {{- if .Values.global.tls.httpsOnly }} + -hcl='ports { http = -1 }' \ + {{- end }} + {{- end }} + -client=0.0.0.0 \ + -config-dir=/consul/config \ + {{- range .Values.server.extraVolumes }} + {{- if .load }} + -config-dir=/consul/userconfig/{{ .name }} \ + {{- end }} + {{- end }} + -datacenter={{ .Values.global.datacenter }} \ + -data-dir=/consul/data \ + -domain={{ .Values.global.domain }} \ + {{- if (and .Values.global.gossipEncryption.secretName .Values.global.gossipEncryption.secretKey) }} + -encrypt="${GOSSIP_KEY}" \ + {{- end }} + {{- if .Values.server.connect }} + -hcl="connect { enabled = true }" \ + {{- end }} + {{- if .Values.ui.enabled }} + -ui \ + {{- end }} + {{- range $index := until (.Values.server.replicas | int) }} + -retry-join=${CONSUL_FULLNAME}-server-{{ $index }}.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc \ + {{- end }} + -server + volumeMounts: + - name: data-{{ .Release.Namespace }} + mountPath: /consul/data + - name: config + mountPath: /consul/config + {{- if .Values.global.tls.enabled }} + - name: consul-ca-cert + mountPath: /consul/tls/ca/ + readOnly: true + - name: tls-server-cert + mountPath: /consul/tls/server + readOnly: true + {{- end }} + {{- range .Values.server.extraVolumes }} + - name: userconfig-{{ .name }} + readOnly: true + mountPath: /consul/userconfig/{{ .name }} + {{- end }} + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - consul leave + ports: + {{- if (or (not .Values.global.tls.enabled) (not .Values.global.tls.httpsOnly)) }} + - containerPort: 8500 + name: http + {{- end }} + {{- if .Values.global.tls.enabled }} + - containerPort: 8501 + name: https + {{- end }} + - containerPort: 8301 + name: serflan + - containerPort: 8302 + name: serfwan + - containerPort: 8300 + name: server + - containerPort: 8600 + name: dns-tcp + protocol: "TCP" + - containerPort: 8600 + name: dns-udp + protocol: "UDP" + readinessProbe: + # NOTE(mitchellh): when our HTTP status endpoints support the + # proper status codes, we should switch to that. This is temporary. + exec: + command: + - "/bin/sh" + - "-ec" + - | + {{- if .Values.global.tls.enabled }} + curl \ + --cacert /consul/tls/ca/tls.crt \ + https://127.0.0.1:8501/v1/status/leader \ + {{- else }} + curl http://127.0.0.1:8500/v1/status/leader \ + {{- end }} + 2>/dev/null | grep -E '".+"' + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 5 + {{- if .Values.server.resources }} + resources: + {{ tpl .Values.server.resources . | nindent 12 | trim }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: + {{ tpl .Values.server.nodeSelector . | indent 8 | trim }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: data-{{ .Release.Namespace }} + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.server.storage }} + {{- if .Values.server.storageClass }} + storageClassName: {{ .Values.server.storageClass }} + {{- end }} +{{- end }} diff --git a/consul-helm/templates/sync-catalog-clusterrole.yaml b/consul-helm/templates/sync-catalog-clusterrole.yaml new file mode 100644 index 0000000..fe0ed62 --- /dev/null +++ b/consul-helm/templates/sync-catalog-clusterrole.yaml @@ -0,0 +1,49 @@ +{{- $syncEnabled := (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} +{{- if $syncEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-sync-catalog + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: + - apiGroups: [""] + resources: + - services + - endpoints + verbs: + - get + - list + - watch +{{- if .Values.syncCatalog.toK8S }} + - update + - patch + - delete + - create +{{- end }} + - apiGroups: [""] + resources: + - nodes + verbs: + - get +{{- if .Values.global.bootstrapACLs }} + - apiGroups: [""] + resources: + - secrets + resourceNames: + - {{ template "consul.fullname" . }}-catalog-sync-acl-token + verbs: + - get +{{- end }} +{{- if .Values.global.enablePodSecurityPolicies }} + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + verbs: + - use + resourceNames: + - {{ template "consul.fullname" . }}-sync-catalog +{{- end }} +{{- end }} diff --git a/consul-helm/templates/sync-catalog-clusterrolebinding.yaml b/consul-helm/templates/sync-catalog-clusterrolebinding.yaml new file mode 100644 index 0000000..648bd30 --- /dev/null +++ b/consul-helm/templates/sync-catalog-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- $syncEnabled := (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} +{{- if $syncEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-sync-catalog + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-sync-catalog +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-sync-catalog + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/consul-helm/templates/sync-catalog-deployment.yaml b/consul-helm/templates/sync-catalog-deployment.yaml new file mode 100644 index 0000000..23cb7f1 --- /dev/null +++ b/consul-helm/templates/sync-catalog-deployment.yaml @@ -0,0 +1,185 @@ +# The deployment for running the sync-catalog pod +{{- if (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "consul.fullname" . }}-sync-catalog + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: sync-catalog + template: + metadata: + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: sync-catalog + annotations: + "consul.hashicorp.com/connect-inject": "false" + spec: + serviceAccountName: {{ template "consul.fullname" . }}-sync-catalog + {{- if .Values.global.tls.enabled }} + volumes: + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + containers: + - name: consul-sync-catalog + image: "{{ default .Values.global.imageK8S .Values.syncCatalog.image }}" + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if (and .Values.syncCatalog.aclSyncToken.secretName .Values.syncCatalog.aclSyncToken.secretKey) }} + - name: CONSUL_HTTP_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.syncCatalog.aclSyncToken.secretName }} + key: {{ .Values.syncCatalog.aclSyncToken.secretKey }} + {{- end }} + {{- if .Values.global.bootstrapACLs }} + - name: CONSUL_HTTP_TOKEN + valueFrom: + secretKeyRef: + name: "{{ template "consul.fullname" . }}-catalog-sync-acl-token" + key: "token" + {{- end}} + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + {{- if .Values.global.tls.enabled }} + volumeMounts: + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s sync-catalog \ + -k8s-default-sync={{ .Values.syncCatalog.default }} \ + {{- if (not .Values.syncCatalog.toConsul) }} + -to-consul=false \ + {{- end }} + {{- if (not .Values.syncCatalog.toK8S) }} + -to-k8s=false \ + {{- end }} + -consul-domain={{ .Values.global.domain }} \ + {{- if .Values.syncCatalog.k8sPrefix }} + -k8s-service-prefix="{{ .Values.syncCatalog.k8sPrefix}}" \ + {{- end }} + {{- if .Values.syncCatalog.k8sSourceNamespace }} + -k8s-source-namespace="{{ .Values.syncCatalog.k8sSourceNamespace}}" \ + {{- end }} + {{- range $value := .Values.syncCatalog.k8sAllowNamespaces }} + -allow-k8s-namespace="{{ $value }}" \ + {{- end }} + {{- range $value := .Values.syncCatalog.k8sDenyNamespaces }} + -deny-k8s-namespace="{{ $value }}" \ + {{- end }} + -k8s-write-namespace=${NAMESPACE} \ + {{- if (not .Values.syncCatalog.syncClusterIPServices) }} + -sync-clusterip-services=false \ + {{- end }} + {{- if .Values.syncCatalog.nodePortSyncType }} + -node-port-sync-type={{ .Values.syncCatalog.nodePortSyncType }} \ + {{- end }} + {{- if .Values.syncCatalog.consulWriteInterval }} + -consul-write-interval={{ .Values.syncCatalog.consulWriteInterval }} \ + {{- end }} + {{- if .Values.syncCatalog.logLevel }} + -log-level={{ .Values.syncCatalog.logLevel }} \ + {{- end }} + {{- if .Values.syncCatalog.k8sTag }} + -consul-k8s-tag={{ .Values.syncCatalog.k8sTag }} \ + {{- end }} + {{- if .Values.syncCatalog.consulPrefix}} + -consul-service-prefix="{{ .Values.syncCatalog.consulPrefix}}" \ + {{- end}} + {{- if .Values.syncCatalog.addK8SNamespaceSuffix}} + -add-k8s-namespace-suffix \ + {{- end}} + {{- if .Values.global.enableConsulNamespaces }} + -enable-namespaces=true \ + {{- if .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} + -consul-destination-namespace={{ .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} \ + {{- end }} + {{- if .Values.syncCatalog.consulNamespaces.mirroringK8S }} + -enable-k8s-namespace-mirroring=true \ + {{- if .Values.syncCatalog.consulNamespaces.mirroringK8SPrefix }} + -k8s-namespace-mirroring-prefix={{ .Values.syncCatalog.consulNamespaces.mirroringK8SPrefix }} \ + {{- end }} + {{- end }} + {{- if .Values.global.bootstrapACLs }} + -consul-cross-namespace-acl-policy=cross-namespace-policy \ + {{- end }} + {{- end }} + livenessProbe: + httpGet: + path: /health/ready + port: 8080 + scheme: HTTP + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /health/ready + port: 8080 + scheme: HTTP + failureThreshold: 5 + initialDelaySeconds: 10 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + {{- if .Values.global.bootstrapACLs }} + initContainers: + - name: sync-acl-init + image: {{ .Values.global.imageK8S }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s acl-init \ + -secret-name="{{ template "consul.fullname" . }}-catalog-sync-acl-token" \ + -k8s-namespace={{ .Release.Namespace }} \ + -init-type="sync" + {{- end }} + {{- if .Values.syncCatalog.nodeSelector }} + nodeSelector: + {{ tpl .Values.syncCatalog.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/consul-helm/templates/sync-catalog-podsecuritypolicy.yaml b/consul-helm/templates/sync-catalog-podsecuritypolicy.yaml new file mode 100644 index 0000000..946a3a8 --- /dev/null +++ b/consul-helm/templates/sync-catalog-podsecuritypolicy.yaml @@ -0,0 +1,38 @@ +{{- if (and .Values.global.enablePodSecurityPolicies (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled))) }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-sync-catalog + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} diff --git a/consul-helm/templates/sync-catalog-serviceaccount.yaml b/consul-helm/templates/sync-catalog-serviceaccount.yaml new file mode 100644 index 0000000..0c4291e --- /dev/null +++ b/consul-helm/templates/sync-catalog-serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- $syncEnabled := (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} +{{- if $syncEnabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-sync-catalog + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end }} diff --git a/consul-helm/templates/tests/test-runner.yaml b/consul-helm/templates/tests/test-runner.yaml new file mode 100644 index 0000000..8f3388e --- /dev/null +++ b/consul-helm/templates/tests/test-runner.yaml @@ -0,0 +1,61 @@ +{{- if .Values.tests.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ template "consul.fullname" . }}-test" + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": test-success +spec: + {{- if .Values.global.tls.enabled }} + volumes: + - name: tls-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + containers: + - name: consul-test + image: "{{ .Values.global.image }}" + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + {{- if .Values.global.tls.enabled }} + volumeMounts: + - name: tls-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + export VALUE="{{ .Release.Name }}" + consul kv delete _consul_helm_test + consul kv put _consul_helm_test $VALUE + [ `consul kv get _consul_helm_test` = "$VALUE" ] + consul kv delete _consul_helm_test + restartPolicy: Never +{{- end }} diff --git a/consul-helm/templates/tls-init-cleanup-clusterrole.yaml b/consul-helm/templates/tls-init-cleanup-clusterrole.yaml new file mode 100644 index 0000000..02cde16 --- /dev/null +++ b/consul-helm/templates/tls-init-cleanup-clusterrole.yaml @@ -0,0 +1,35 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.tls.enabled }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-tls-init-cleanup + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: [""] + resources: + - secrets + resourceNames: + {{- if (not (and .Values.global.tls.caCert.secretName .Values.global.tls.caKey.secretName)) }} + - {{ template "consul.fullname" . }}-ca-cert + - {{ template "consul.fullname" . }}-ca-key + {{- end }} + - {{ template "consul.fullname" . }}-server-cert + verbs: + - delete +{{- if .Values.global.enablePodSecurityPolicies }} +- apiGroups: ["policy"] + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "consul.fullname" . }}-tls-init-cleanup +{{- end }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/tls-init-cleanup-clusterrolebinding.yaml b/consul-helm/templates/tls-init-cleanup-clusterrolebinding.yaml new file mode 100644 index 0000000..b97efe4 --- /dev/null +++ b/consul-helm/templates/tls-init-cleanup-clusterrolebinding.yaml @@ -0,0 +1,22 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.tls.enabled }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-tls-init-cleanup + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-tls-init-cleanup +subjects: +- kind: ServiceAccount + name: {{ template "consul.fullname" . }}-tls-init-cleanup + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/tls-init-cleanup-job.yaml b/consul-helm/templates/tls-init-cleanup-job.yaml new file mode 100644 index 0000000..39cc531 --- /dev/null +++ b/consul-helm/templates/tls-init-cleanup-job.yaml @@ -0,0 +1,54 @@ +# tls-init-cleanup job deletes Kubernetes secrets created by tls-init +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.tls.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "consul.fullname" . }}-tls-init-cleanup + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + metadata: + name: {{ template "consul.fullname" . }}-tls-init-cleanup + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: tls-init-cleanup + annotations: + "consul.hashicorp.com/connect-inject": "false" + spec: + restartPolicy: Never + serviceAccountName: {{ template "consul.fullname" . }}-tls-init-cleanup + containers: + - name: tls-init-cleanup + image: "{{ .Values.global.image }}" + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: + - "/bin/sh" + - "-ec" + - | + {{- if (not (and .Values.global.tls.caCert.secretName .Values.global.tls.caKey.secretName)) }} + curl -s -X DELETE --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}/api/v1/namespaces/${NAMESPACE}/secrets/{{ template "consul.fullname" . }}-ca-cert \ + -H "Authorization: Bearer $( cat /var/run/secrets/kubernetes.io/serviceaccount/token )" + curl -s -X DELETE --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}/api/v1/namespaces/${NAMESPACE}/secrets/{{ template "consul.fullname" . }}-ca-key \ + -H "Authorization: Bearer $( cat /var/run/secrets/kubernetes.io/serviceaccount/token )" + {{- end }} + curl -s -X DELETE --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}/api/v1/namespaces/${NAMESPACE}/secrets/{{ template "consul.fullname" . }}-server-cert \ + -H "Authorization: Bearer $( cat /var/run/secrets/kubernetes.io/serviceaccount/token )" +{{- end }} +{{- end }} diff --git a/consul-helm/templates/tls-init-cleanup-podsecuritypolicy.yaml b/consul-helm/templates/tls-init-cleanup-podsecuritypolicy.yaml new file mode 100644 index 0000000..d58e2bd --- /dev/null +++ b/consul-helm/templates/tls-init-cleanup-podsecuritypolicy.yaml @@ -0,0 +1,36 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if (and .Values.global.tls.enabled .Values.global.enablePodSecurityPolicies) }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-tls-init-cleanup + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'secret' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} +{{- end }} diff --git a/consul-helm/templates/tls-init-cleanup-serviceaccount.yaml b/consul-helm/templates/tls-init-cleanup-serviceaccount.yaml new file mode 100644 index 0000000..b62a005 --- /dev/null +++ b/consul-helm/templates/tls-init-cleanup-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.tls.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-tls-init-cleanup + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/tls-init-clusterrole.yaml b/consul-helm/templates/tls-init-clusterrole.yaml new file mode 100644 index 0000000..dfd75eb --- /dev/null +++ b/consul-helm/templates/tls-init-clusterrole.yaml @@ -0,0 +1,32 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.tls.enabled }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-tls-init + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation +rules: +- apiGroups: [""] + resources: + - secrets + verbs: + - create +{{- if .Values.global.enablePodSecurityPolicies }} +- apiGroups: ["policy"] + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "consul.fullname" . }}-tls-init +{{- end }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/tls-init-clusterrolebinding.yaml b/consul-helm/templates/tls-init-clusterrolebinding.yaml new file mode 100644 index 0000000..7ce6801 --- /dev/null +++ b/consul-helm/templates/tls-init-clusterrolebinding.yaml @@ -0,0 +1,25 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.tls.enabled }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-tls-init + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-tls-init +subjects: +- kind: ServiceAccount + name: {{ template "consul.fullname" . }}-tls-init + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/tls-init-job.yaml b/consul-helm/templates/tls-init-job.yaml new file mode 100644 index 0000000..3700a42 --- /dev/null +++ b/consul-helm/templates/tls-init-job.yaml @@ -0,0 +1,113 @@ +# tls-init job generate Consul cluster CA and certificates for the Consul servers +# and creates Kubernetes secrets for them. +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.tls.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "consul.fullname" . }}-tls-init + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + metadata: + name: {{ template "consul.fullname" . }}-tls-init + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: tls-init + annotations: + "consul.hashicorp.com/connect-inject": "false" + spec: + restartPolicy: Never + serviceAccountName: {{ template "consul.fullname" . }}-tls-init + {{- if (and .Values.global.tls.caCert.secretName .Values.global.tls.caKey.secretName) }} + volumes: + - name: consul-ca-cert + secret: + secretName: {{ .Values.global.tls.caCert.secretName }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + - name: consul-ca-key + secret: + secretName: {{ .Values.global.tls.caKey.secretName }} + items: + - key: {{ default "tls.key" .Values.global.tls.caKey.secretKey }} + path: tls.key + {{- end }} + containers: + - name: tls-init + image: "{{ .Values.global.image }}" + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # We're using POST requests below to create secrets via Kubernetes API. + # Note that in the subsequent runs of the job, POST requests will + # return a 409 because these secrets would already exist; + # we are ignoring these response codes. + command: + - "/bin/sh" + - "-ec" + - | + {{- if (not (and .Values.global.tls.caCert.secretName .Values.global.tls.caKey.secretName)) }} + consul tls ca create \ + -domain={{ .Values.global.domain }} + curl -s -X POST --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}/api/v1/namespaces/${NAMESPACE}/secrets \ + -H "Authorization: Bearer $( cat /var/run/secrets/kubernetes.io/serviceaccount/token )" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d "{ \"kind\": \"Secret\", \"apiVersion\": \"v1\", \"metadata\": { \"name\": \"{{ template "consul.fullname" . }}-ca-cert\", \"namespace\": \"${NAMESPACE}\" }, \"type\": \"Opaque\", \"data\": { \"tls.crt\": \"$( cat {{ .Values.global.domain }}-agent-ca.pem | base64 | tr -d '\n' )\" }}" > /dev/null + curl -s -X POST --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}/api/v1/namespaces/${NAMESPACE}/secrets \ + -H "Authorization: Bearer $( cat /var/run/secrets/kubernetes.io/serviceaccount/token )" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d "{ \"kind\": \"Secret\", \"apiVersion\": \"v1\", \"metadata\": { \"name\": \"{{ template "consul.fullname" . }}-ca-key\", \"namespace\": \"${NAMESPACE}\" }, \"type\": \"Opaque\", \"data\": { \"tls.key\": \"$( cat {{ .Values.global.domain }}-agent-ca-key.pem | base64 | tr -d '\n' )\" }}" > /dev/null + {{- end }} + consul tls cert create -server \ + -days=730 \ + {{- if (and .Values.global.tls.caCert.secretName .Values.global.tls.caKey.secretName) }} + -ca=/consul/tls/ca/cert/tls.crt \ + -key=/consul/tls/ca/key/tls.key \ + {{- end }} + -additional-dnsname='{{ template "consul.fullname" . }}-server' \ + -additional-dnsname='*.{{ template "consul.fullname" . }}-server' \ + -additional-dnsname='*.{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}' \ + -additional-dnsname='*.{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc' \ + {{- range .Values.global.tls.serverAdditionalIPSANs }} + -additional-ipaddress={{ . }} \ + {{- end }} + {{- range .Values.global.tls.serverAdditionalDNSSANs }} + -additional-dnsname={{ . }} \ + {{- end }} + -dc={{ .Values.global.datacenter }} \ + -domain={{ .Values.global.domain }} + curl -s -X POST --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}/api/v1/namespaces/${NAMESPACE}/secrets \ + -H "Authorization: Bearer $( cat /var/run/secrets/kubernetes.io/serviceaccount/token )" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d "{ \"kind\": \"Secret\", \"apiVersion\": \"v1\", \"metadata\": { \"name\": \"{{ template "consul.fullname" . }}-server-cert\", \"namespace\": \"${NAMESPACE}\" }, \"type\": \"kubernetes.io/tls\", \"data\": { \"tls.crt\": \"$( cat {{ .Values.global.datacenter }}-server-{{ .Values.global.domain }}-0.pem | base64 | tr -d '\n' )\", \"tls.key\": \"$( cat {{ .Values.global.datacenter }}-server-{{ .Values.global.domain }}-0-key.pem | base64 | tr -d '\n' )\" } }" > /dev/null + {{- if (and .Values.global.tls.caCert.secretName .Values.global.tls.caKey.secretName) }} + volumeMounts: + - name: consul-ca-cert + mountPath: /consul/tls/ca/cert + readOnly: true + - name: consul-ca-key + mountPath: /consul/tls/ca/key + readOnly: true + {{- end }} +{{- end }} +{{- end }} diff --git a/consul-helm/templates/tls-init-podsecuritypolicy.yaml b/consul-helm/templates/tls-init-podsecuritypolicy.yaml new file mode 100644 index 0000000..30854bf --- /dev/null +++ b/consul-helm/templates/tls-init-podsecuritypolicy.yaml @@ -0,0 +1,39 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if (and .Values.global.tls.enabled .Values.global.enablePodSecurityPolicies) }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-tls-init + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'secret' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} +{{- end }} diff --git a/consul-helm/templates/tls-init-serviceaccount.yaml b/consul-helm/templates/tls-init-serviceaccount.yaml new file mode 100644 index 0000000..e59e44e --- /dev/null +++ b/consul-helm/templates/tls-init-serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- if (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.global.tls.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-tls-init + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation +{{- end }} +{{- end }} diff --git a/consul-helm/templates/ui-service.yaml b/consul-helm/templates/ui-service.yaml new file mode 100644 index 0000000..6f12271 --- /dev/null +++ b/consul-helm/templates/ui-service.yaml @@ -0,0 +1,39 @@ +# UI Service for Consul Server +{{- if (and (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) (or (and (ne (.Values.ui.enabled | toString) "-") .Values.ui.enabled) (and (eq (.Values.ui.enabled | toString) "-") .Values.global.enabled)) (or (and (ne (.Values.ui.service.enabled | toString) "-") .Values.ui.service.enabled) (and (eq (.Values.ui.service.enabled | toString) "-") .Values.global.enabled))) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "consul.fullname" . }}-ui + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- if .Values.ui.service.annotations }} + annotations: + {{ tpl .Values.ui.service.annotations . | nindent 4 | trim }} + {{- end }} +spec: + selector: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: server + ports: + {{- if (or (not .Values.global.tls.enabled) (not .Values.global.tls.httpsOnly)) }} + - name: http + port: 80 + targetPort: 8500 + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: https + port: 443 + targetPort: 8501 + {{- end }} + {{- if .Values.ui.service.type }} + type: {{ .Values.ui.service.type }} + {{- end }} + {{- if .Values.ui.service.additionalSpec }} + {{ tpl .Values.ui.service.additionalSpec . | nindent 2 | trim }} + {{- end }} +{{- end }} diff --git a/consul-helm/test/acceptance/_helpers.bash b/consul-helm/test/acceptance/_helpers.bash new file mode 100644 index 0000000..a204f6b --- /dev/null +++ b/consul-helm/test/acceptance/_helpers.bash @@ -0,0 +1,56 @@ +# name_prefix returns the prefix of the resources within Kubernetes. +name_prefix() { + printf "consul" +} + +# helm_install installs the Consul chart. This will source overridable +# values from the "values.yaml" file in this directory. This can be set +# by CI or other environments to do test-specific overrides. Note that its +# easily possible to break tests this way so be careful. +helm_install() { + local values="${BATS_TEST_DIRNAME}/values.yaml" + if [ ! -f "${values}" ]; then + touch $values + fi + + helm install -f ${values} \ + --name consul \ + --wait \ + ${BATS_TEST_DIRNAME}/../.. +} + +# helm_delete deletes the Consul chart and all resources. +helm_delete() { + helm delete --purge consul + kubectl delete --all pvc +} + +# wait for a pod to be ready +wait_for_ready() { + POD_NAME=$1 + + check() { + # This requests the pod and checks whether the status is running + # and the ready state is true. If so, it outputs the name. Otherwise + # it outputs empty. Therefore, to check for success, check for nonzero + # string length. + kubectl get pods $1 -o json | \ + jq -r 'select( + .status.phase == "Running" and + ([ .status.conditions[] | select(.type == "Ready" and .status == "True") ] | length) == 1 + ) | .metadata.namespace + "/" + .metadata.name' + } + + for i in $(seq 30); do + if [ -n "$(check ${POD_NAME})" ]; then + echo "${POD_NAME} is ready." + return + fi + + echo "Waiting for ${POD_NAME} to be ready..." + sleep 2 + done + + echo "${POD_NAME} never became ready." + exit 1 +} diff --git a/consul-helm/test/acceptance/server.bats b/consul-helm/test/acceptance/server.bats new file mode 100644 index 0000000..371756d --- /dev/null +++ b/consul-helm/test/acceptance/server.bats @@ -0,0 +1,19 @@ +#!/usr/bin/env bats + +load _helpers + +@test "server: default, comes up healthy" { + helm_install + wait_for_ready $(name_prefix)-consul-server-0 + + # Verify there are three servers + local server_count=$(kubectl exec "$(name_prefix)-consul-server-0" consul members | + grep server | + wc -l) + [ "${server_count}" -eq "3" ] + + helm test consul + + # Clean up + helm_delete +} diff --git a/consul-helm/test/docker/Test.dockerfile b/consul-helm/test/docker/Test.dockerfile new file mode 100644 index 0000000..7e3ec3b --- /dev/null +++ b/consul-helm/test/docker/Test.dockerfile @@ -0,0 +1,51 @@ +# This Dockerfile installs all the dependencies necessary to run the unit and +# acceptance tests. This image also contains gcloud so you can run tests +# against a GKE cluster easily. +# +# This image has no automatic entrypoint. It is expected that you'll run +# a script to configure kubectl, potentially install Helm, and run the tests +# manually. This image only has the dependencies pre-installed. + +FROM alpine:latest +WORKDIR /root + +ENV BATS_VERSION "1.1.0" +ENV TERRAFORM_VERSION "0.12.10" + +# base packages +RUN apk update && apk add --no-cache --virtual .build-deps \ + ca-certificates \ + curl \ + tar \ + bash \ + openssl \ + python \ + py-pip \ + git \ + jq + +# yq +RUN pip install yq + +# gcloud +RUN curl -OL https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash && \ + bash install_google_cloud_sdk.bash --disable-prompts --install-dir='/root/' && \ + ln -s /root/google-cloud-sdk/bin/gcloud /usr/local/bin/gcloud + +# terraform +RUN curl -sSL https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip -o /tmp/tf.zip \ + && unzip /tmp/tf.zip \ + && ln -s /root/terraform /usr/local/bin/terraform + +# kubectl +RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ + chmod +x ./kubectl && \ + mv ./kubectl /usr/local/bin/kubectl + +# helm +RUN curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash + +# bats +RUN curl -sSL https://github.com/bats-core/bats-core/archive/v${BATS_VERSION}.tar.gz -o /tmp/bats.tgz \ + && tar -zxf /tmp/bats.tgz -C /tmp \ + && /bin/bash /tmp/bats-core-${BATS_VERSION}/install.sh /usr/local diff --git a/consul-helm/test/terraform/main.tf b/consul-helm/test/terraform/main.tf new file mode 100644 index 0000000..68fd74f --- /dev/null +++ b/consul-helm/test/terraform/main.tf @@ -0,0 +1,71 @@ +locals { + service_account_path = "${path.module}/service-account.yaml" +} + +provider "google" { + project = var.project +} + +resource "random_id" "suffix" { + byte_length = 4 +} + +data "google_container_engine_versions" "main" { + location = var.zone +} + +resource "google_container_cluster" "cluster" { + name = "consul-k8s-${random_id.suffix.dec}" + project = var.project + enable_legacy_abac = true + initial_node_count = 3 + location = var.zone + min_master_version = data.google_container_engine_versions.main.latest_master_version + node_version = data.google_container_engine_versions.main.latest_node_version +} + +resource "null_resource" "kubectl" { + count = var.init_cli ? 1 : 0 + + triggers = { + cluster = google_container_cluster.cluster.id + } + + # On creation, we want to setup the kubectl credentials. The easiest way + # to do this is to shell out to gcloud. + provisioner "local-exec" { + command = "gcloud container clusters get-credentials --zone=${var.zone} ${google_container_cluster.cluster.name}" + } + + # On destroy we want to try to clean up the kubectl credentials. This + # might fail if the credentials are already cleaned up or something so we + # want this to continue on failure. Generally, this works just fine since + # it only operates on local data. + provisioner "local-exec" { + when = "destroy" + on_failure = "continue" + command = "kubectl config get-clusters | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-cluster" + } + + provisioner "local-exec" { + when = "destroy" + on_failure = "continue" + command = "kubectl config get-contexts | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-context" + } +} + +resource "null_resource" "helm" { + count = var.init_cli ? 1 : 0 + depends_on = ["null_resource.kubectl"] + + triggers = { + cluster = google_container_cluster.cluster.id + } + + provisioner "local-exec" { + command = < 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/ClusterRole: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrole.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/ClusterRole: can be enabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrole.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/ClusterRole: disabled with client.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrole.yaml \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/ClusterRole: enabled with client.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrole.yaml \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +# The rules key must always be set (#178). +@test "client/ClusterRole: rules empty with client.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrole.yaml \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq '.rules' | tee /dev/stderr) + [ "${actual}" = "[]" ] +} + +#-------------------------------------------------------------------- +# global.enablePodSecurityPolicies + +@test "client/ClusterRole: allows podsecuritypolicies access with global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrole.yaml \ + --set 'client.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules[0].resources[0]' | tee /dev/stderr) + [ "${actual}" = "podsecuritypolicies" ] +} + +#-------------------------------------------------------------------- +# global.bootstrapACLs + +@test "client/ClusterRole: allows secret access with global.bootsrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrole.yaml \ + --set 'client.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -r '.rules[0].resources[0]' | tee /dev/stderr) + [ "${actual}" = "secrets" ] +} + +@test "client/ClusterRole: allows secret access with global.bootsrapACLs=true and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrole.yaml \ + --set 'client.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules[1].resources[0]' | tee /dev/stderr) + [ "${actual}" = "secrets" ] +} diff --git a/consul-helm/test/unit/client-clusterrolebinding.bats b/consul-helm/test/unit/client-clusterrolebinding.bats new file mode 100644 index 0000000..31bd33f --- /dev/null +++ b/consul-helm/test/unit/client-clusterrolebinding.bats @@ -0,0 +1,53 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/ClusterRoleBinding: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/ClusterRoleBinding: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrolebinding.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/ClusterRoleBinding: disabled with client disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrolebinding.yaml \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/ClusterRoleBinding: enabled with client enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrolebinding.yaml \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/ClusterRoleBinding: enabled with client enabled and global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-clusterrolebinding.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} \ No newline at end of file diff --git a/consul-helm/test/unit/client-configmap.bats b/consul-helm/test/unit/client-configmap.bats new file mode 100755 index 0000000..0e0fbc3 --- /dev/null +++ b/consul-helm/test/unit/client-configmap.bats @@ -0,0 +1,77 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/ConfigMap: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-config-configmap.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/ConfigMap: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-config-configmap.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/ConfigMap: disable with client.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-config-configmap.yaml \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/ConfigMap: disable with global.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-config-configmap.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/ConfigMap: extraConfig is set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-config-configmap.yaml \ + --set 'client.extraConfig="{\"hello\": \"world\"}"' \ + . | tee /dev/stderr | + yq '.data["extra-from-values.json"] | match("world") | length' | tee /dev/stderr) + [ ! -z "${actual}" ] +} + +#-------------------------------------------------------------------- +# connectInject.centralConfig + +@test "client/ConfigMap: centralConfig is enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-config-configmap.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.data["central-config.json"] | contains("enable_central_service_config")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/ConfigMap: centralConfig can be disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-config-configmap.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.enabled=false' \ + . | tee /dev/stderr | + yq '.data["central-config.json"] | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} diff --git a/consul-helm/test/unit/client-daemonset.bats b/consul-helm/test/unit/client-daemonset.bats new file mode 100755 index 0000000..6f13936 --- /dev/null +++ b/consul-helm/test/unit/client-daemonset.bats @@ -0,0 +1,855 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/DaemonSet: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: disable with client.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: disable with global.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: image defaults to global.image" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.image=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "foo" ] +} + +@test "client/DaemonSet: image can be overridden with client.image" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.image=foo' \ + --set 'client.image=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "client/DaemonSet: no updateStrategy when not updating" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq -r '.spec.updateStrategy' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +#-------------------------------------------------------------------- +# retry-join + +@test "client/DaemonSet: retry join gets populated" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'server.replicas=3' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command | any(contains("-retry-join"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + + +#-------------------------------------------------------------------- +# grpc + +@test "client/DaemonSet: grpc is enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("grpc"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: grpc can be disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.grpc=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("grpc"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "client/DaemonSet: no resources defined by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "client/DaemonSet: resources can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.resources=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = "foo" ] +} + +#-------------------------------------------------------------------- +# extraVolumes + +@test "client/DaemonSet: adds extra volume" { + cd `chart_dir` + + # Test that it defines it + local object=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.extraVolumes[0].type=configMap' \ + --set 'client.extraVolumes[0].name=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.volumes[] | select(.name == "userconfig-foo")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.configMap.name' | tee /dev/stderr) + [ "${actual}" = "foo" ] + + local actual=$(echo $object | + yq -r '.configMap.secretName' | tee /dev/stderr) + [ "${actual}" = "null" ] + + # Test that it mounts it + local object=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.extraVolumes[0].type=configMap' \ + --set 'client.extraVolumes[0].name=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/userconfig/foo" ] + + # Doesn't load it + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.extraVolumes[0].type=configMap' \ + --set 'client.extraVolumes[0].name=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command | map(select(test("userconfig"))) | length' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +@test "client/DaemonSet: adds extra secret volume" { + cd `chart_dir` + + # Test that it defines it + local object=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.extraVolumes[0].type=secret' \ + --set 'client.extraVolumes[0].name=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.volumes[] | select(.name == "userconfig-foo")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.secret.name' | tee /dev/stderr) + [ "${actual}" = "null" ] + + local actual=$(echo $object | + yq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo" ] + + # Test that it mounts it + local object=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.extraVolumes[0].type=configMap' \ + --set 'client.extraVolumes[0].name=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/userconfig/foo" ] + + # Doesn't load it + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.extraVolumes[0].type=configMap' \ + --set 'client.extraVolumes[0].name=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command | map(select(test("userconfig"))) | length' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +@test "client/DaemonSet: adds loadable volume" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.extraVolumes[0].type=configMap' \ + --set 'client.extraVolumes[0].name=foo' \ + --set 'client.extraVolumes[0].load=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command | map(select(contains("/consul/userconfig/foo"))) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "client/DaemonSet: nodeSelector is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "client/DaemonSet: specified nodeSelector" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.nodeSelector=testing' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "testing" ] +} + +#-------------------------------------------------------------------- +# affinity + +@test "client/DaemonSet: affinity not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec | .affinity? == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: specified affinity" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.affinity=foobar' \ + . | tee /dev/stderr | + yq '.spec.template.spec | .affinity == "foobar"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# priorityClassName + +@test "client/DaemonSet: priorityClassName is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.priorityClassName' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "client/DaemonSet: specified priorityClassName" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.priorityClassName=testing' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.priorityClassName' | tee /dev/stderr) + [ "${actual}" = "testing" ] +} + +#-------------------------------------------------------------------- +# annotations + +@test "client/DaemonSet: no annotations defined by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject")' | tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "client/DaemonSet: annotations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.annotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# tolerations + +@test "client/DaemonSet: tolerations not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec | .tolerations? == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: tolerations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.tolerations=foobar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations == "foobar"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# gossip encryption + +@test "client/DaemonSet: gossip encryption disabled in client DaemonSet by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | length > 0' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/DaemonSet: gossip encryption disabled in client DaemonSet when clients are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.enabled=false' \ + --set 'global.gossipEncryption.secretName=foo' \ + --set 'global.gossipEncryption.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: gossip encryption disabled in client DaemonSet when secretName is missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.gossipEncryption.secretKey=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | length > 0' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/DaemonSet: gossip encryption disabled in client DaemonSet when secretKey is missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.gossipEncryption.secretName=foo' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | length > 0' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/DaemonSet: gossip environment variable present in client DaemonSet when all config is provided" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.gossipEncryption.secretKey=foo' \ + --set 'global.gossipEncryption.secretName=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: encrypt CLI option not present in client DaemonSet when encryption disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name=="consul") | .command | join(" ") | contains("encrypt")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: encrypt CLI option present in client DaemonSet when all config is provided" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.gossipEncryption.secretKey=foo' \ + --set 'global.gossipEncryption.secretName=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name=="consul") | .command | join(" ") | contains("encrypt")' | tee /dev/stderr) + [ "${actual}" == "true" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "client/DaemonSet: CA volume present when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "client/DaemonSet: client certificate volume present when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "tls-client-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "client/DaemonSet: port 8501 is not exposed when TLS is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].ports[] | select (.containerPort == 8501)' | tee /dev/stderr) + [ "${actual}" == "" ] +} + +@test "client/DaemonSet: port 8501 is exposed when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].ports[] | select (.containerPort == 8501)' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "client/DaemonSet: port 8500 is still exposed when httpsOnly is not enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.httpsOnly=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].ports[] | select (.containerPort == 8500)' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "client/DaemonSet: port 8500 is not exposed when httpsOnly is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.httpsOnly=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].ports[] | select (.containerPort == 8500)' | tee /dev/stderr) + [ "${actual}" == "" ] +} + +@test "client/DaemonSet: readiness checks are over HTTP TLS is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].readinessProbe.exec.command | join(" ") | contains("http://127.0.0.1:8500")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: readiness checks are over HTTPS when TLS is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].readinessProbe.exec.command | join(" ") | contains("https://127.0.0.1:8501")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: readiness checks use CA certificate when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].readinessProbe.exec.command | join(" ") | contains("--cacert /consul/tls/ca/tls.crt")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: HTTP port is disabled when global.tls.httpsOnly is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.httpsOnly=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | join(" ") | contains("ports { http = -1 }")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: init container is created when global.tls.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "client-tls-init") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: both ACL and TLS init containers are created when global.tls.enabled=true and global.bootstrapACLs=true" { + cd `chart_dir` + local has_acl_init_container=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init") | length > 0' | tee /dev/stderr) + + [ "${has_acl_init_container}" = "true" ] + + local has_tls_init_container=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init") | length > 0' | tee /dev/stderr) + + [ "${has_tls_init_container}" = "true" ] +} + +@test "client/DaemonSet: sets Consul environment variables when global.tls.enabled" { + cd `chart_dir` + local env=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "https://localhost:8501" ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "client/DaemonSet: sets verify_* flags to true by default when global.tls.enabled" { + cd `chart_dir` + local command=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | join(" ")' | tee /dev/stderr) + + local actual + actual=$(echo $command | jq -r '. | contains("verify_incoming_rpc = true")' | tee /dev/stderr) + [ "${actual}" = "true" ] + + actual=$(echo $command | jq -r '. | contains("verify_outgoing = true")' | tee /dev/stderr) + [ "${actual}" = "true" ] + + actual=$(echo $command | jq -r '. | contains("verify_server_hostname = true")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: doesn't set the verify_* flags by default when global.tls.enabled and global.tls.verify is false" { + cd `chart_dir` + local command=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.verify=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | join(" ")' | tee /dev/stderr) + + local actual + actual=$(echo $command | jq -r '. | contains("verify_incoming_rpc = true")' | tee /dev/stderr) + [ "${actual}" = "false" ] + + actual=$(echo $command | jq -r '. | contains("verify_outgoing = true")' | tee /dev/stderr) + [ "${actual}" = "false" ] + + actual=$(echo $command | jq -r '. | contains("verify_server_hostname = true")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: can overwrite CA secret with the provided one" { + cd `chart_dir` + local spec=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $spec | jq -r '.volumes[] | select(.name=="consul-ca-cert") | .secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the provided ca key secret is attached as volume + actual=$(echo $spec | jq -r '.volumes[] | select(.name=="consul-ca-key") | .secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-key" ] + + # check that the volumes pulls the provided secret keys as a CA cert + actual=$(echo $spec | jq -r '.volumes[] | select(.name=="consul-ca-cert") | .secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] + + # check that the volumes pulls the provided secret keys as a CA key + actual=$(echo $spec | jq -r '.volumes[] | select(.name=="consul-ca-key") | .secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} + +#-------------------------------------------------------------------- +# extraEnvironmentVariables + +@test "client/DaemonSet: custom environment variables" { + cd `chart_dir` + local object=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.extraEnvironmentVars.custom_proxy=fakeproxy' \ + --set 'client.extraEnvironmentVars.no_proxy=custom_no_proxy' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.[3].name' | tee /dev/stderr) + [ "${actual}" = "custom_proxy" ] + + local actual=$(echo $object | + yq -r '.[3].value' | tee /dev/stderr) + [ "${actual}" = "fakeproxy" ] + + local actual=$(echo $object | + yq -r '.[4].name' | tee /dev/stderr) + [ "${actual}" = "no_proxy" ] + + local actual=$(echo $object | + yq -r '.[4].value' | tee /dev/stderr) + [ "${actual}" = "custom_no_proxy" ] +} + +#-------------------------------------------------------------------- +# global.bootstrapACLs + +@test "client/DaemonSet: aclconfig volume is created when global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[2].name == "aclconfig"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: aclconfig volumeMount is created when global.bootstrapACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[2]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "aclconfig" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/aclconfig" ] +} + +@test "client/DaemonSet: command includes aclconfig dir when global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("/consul/aclconfig"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: init container is created when global.bootstrapACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# client.exposeGossipPorts + +@test "client/DaemonSet: client uses podIP when client.exposeGossipPorts=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.enabled=true' \ + --set 'client.exposeGossipPorts=false' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers | map(select(.name=="consul")) | .[0].env | map(select(.name=="ADVERTISE_IP")) | .[0] | .valueFrom.fieldRef.fieldPath' | + tee /dev/stderr) + [ "${actual}" = "status.podIP" ] +} + +@test "client/DaemonSet: client uses hostIP when client.exposeGossipPorts=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.enabled=true' \ + --set 'client.exposeGossipPorts=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers | map(select(.name=="consul")) | .[0].env | map(select(.name=="ADVERTISE_IP")) | .[0] | .valueFrom.fieldRef.fieldPath' | + tee /dev/stderr) + [ "${actual}" = "status.hostIP" ] +} + +@test "client/DaemonSet: client doesn't expose hostPorts when client.exposeGossipPorts=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'server.enabled=true' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers | map(select(.name=="consul")) | .[0].ports | map(select(.containerPort==8301)) | .[0].hostPort' | + tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "client/DaemonSet: client exposes hostPorts when client.exposeGossipPorts=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.enabled=true' \ + --set 'client.exposeGossipPorts=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers | map(select(.name=="consul")) | .[0].ports | map(select(.containerPort==8301)) | .[0].hostPort' | + tee /dev/stderr) + [ "${actual}" = "8301" ] +} + +#-------------------------------------------------------------------- +# dataDirectoryHostPath + +@test "client/DaemonSet: data directory is emptyDir by defaut" { + cd `chart_dir` + # Test that hostPath is set to null. + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[0].hostPath == null' | tee /dev/stderr ) + [ "${actual}" = "true" ] + + # Test that emptyDir is set instead. + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[0].emptyDir == {}' | tee /dev/stderr ) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: hostPath data directory can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.dataDirectoryHostPath=/opt/consul' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[0].hostPath.path == "/opt/consul"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# dnsPolicy + +@test "client/DaemonSet: dnsPolicy not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.dnsPolicy == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: dnsPolicy can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set 'client.dnsPolicy=ClusterFirstWithHostNet' \ + . | tee /dev/stderr | + yq '.spec.template.spec.dnsPolicy == "ClusterFirstWithHostNet"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# updateStrategy + +@test "client/DaemonSet: updateStrategy not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + . | tee /dev/stderr | \ + yq '.spec.updateStrategy == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/DaemonSet: updateStrategy can be set" { + cd `chart_dir` + local updateStrategy="type: RollingUpdate +rollingUpdate: + maxUnavailable: 5 +" + local actual=$(helm template \ + -x templates/client-daemonset.yaml \ + --set "client.updateStrategy=${updateStrategy}" \ + . | tee /dev/stderr | \ + yq -c '.spec.updateStrategy == {"type":"RollingUpdate","rollingUpdate":{"maxUnavailable":5}}' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/client-podsecuritypolicy.bats b/consul-helm/test/unit/client-podsecuritypolicy.bats new file mode 100644 index 0000000..2622a45 --- /dev/null +++ b/consul-helm/test/unit/client-podsecuritypolicy.bats @@ -0,0 +1,131 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/PodSecurityPolicy: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-podsecuritypolicy.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/PodSecurityPolicy: disabled with client disabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-podsecuritypolicy.yaml \ + --set 'client.enabled=false' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/PodSecurityPolicy: enabled with global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-podsecuritypolicy.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/PodSecurityPolicy: only http and grpc ports are allowed as hostPorts by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-podsecuritypolicy.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -c '.spec.hostPorts' | tee /dev/stderr) + [ "${actual}" = '[{"min":8500,"max":8500},{"min":8502,"max":8502}]' ] +} + +#-------------------------------------------------------------------- +# client.grpc + +@test "client/PodSecurityPolicy: hostPort 8502 is not allowed when client.grpc=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-podsecuritypolicy.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + --set 'client.grpc=false' \ + . | tee /dev/stderr | + yq -c '.spec.hostPorts' | tee /dev/stderr) + [ "${actual}" = '[{"min":8500,"max":8500}]' ] +} + +#-------------------------------------------------------------------- +# client.exposeGossipPorts + +@test "client/PodSecurityPolicy: hostPort 8301 allowed when exposeGossipPorts=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-podsecuritypolicy.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + --set 'client.exposeGossipPorts=true' \ + . | tee /dev/stderr | + yq -c '.spec.hostPorts' | tee /dev/stderr) + [ "${actual}" = '[{"min":8500,"max":8500},{"min":8502,"max":8502},{"min":8301,"max":8301}]' ] +} + +#-------------------------------------------------------------------- +# client.dataDirectoryHostPath + +@test "client/PodSecurityPolicy: disallows hostPath volume by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-podsecuritypolicy.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq '.spec.volumes | any(contains("hostPath"))' | tee /dev/stderr) + [ "${actual}" = 'false' ] +} + +@test "client/PodSecurityPolicy: allows hostPath volume when dataDirectoryHostPath is set" { + cd `chart_dir` + # Test that hostPath is an allowed volume type. + local actual=$(helm template \ + -x templates/client-podsecuritypolicy.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + --set 'client.dataDirectoryHostPath=/opt/consul' \ + . | tee /dev/stderr | + yq '.spec.volumes | any(contains("hostPath"))' | tee /dev/stderr) + [ "${actual}" = 'true' ] + + # Test that the path we're allowed to write to is the right one. + local actual=$(helm template \ + -x templates/client-podsecuritypolicy.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + --set 'client.dataDirectoryHostPath=/opt/consul' \ + . | tee /dev/stderr | + yq -r '.spec.allowedHostPaths[0].pathPrefix' | tee /dev/stderr) + [ "${actual}" = '/opt/consul' ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "client/PodSecurityPolicy: hostPort 8501 is allowed when global.tls.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-podsecuritypolicy.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -c '.spec.hostPorts' | tee /dev/stderr) + [ "${actual}" = '[{"min":8501,"max":8501},{"min":8502,"max":8502}]' ] +} + +@test "client/PodSecurityPolicy: hostPort 8500 is not allowed when global.tls.enabled=true and global.tls.httpsOnly=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-podsecuritypolicy.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.httpsOnly=true' \ + . | tee /dev/stderr | + yq -c '.spec.hostPorts' | tee /dev/stderr) + [ "${actual}" = '[{"min":8501,"max":8501},{"min":8502,"max":8502}]' ] +} diff --git a/consul-helm/test/unit/client-serviceaccount.bats b/consul-helm/test/unit/client-serviceaccount.bats new file mode 100644 index 0000000..f787e37 --- /dev/null +++ b/consul-helm/test/unit/client-serviceaccount.bats @@ -0,0 +1,53 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/ServiceAccount: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/ServiceAccount: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-serviceaccount.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/ServiceAccount: disabled with client disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-serviceaccount.yaml \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/ServiceAccount: enabled with client enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-serviceaccount.yaml \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/ServiceAccount: enabled with client enabled and global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-serviceaccount.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} \ No newline at end of file diff --git a/consul-helm/test/unit/client-snapshot-agent-clusterrole.bats b/consul-helm/test/unit/client-snapshot-agent-clusterrole.bats new file mode 100644 index 0000000..25fe077 --- /dev/null +++ b/consul-helm/test/unit/client-snapshot-agent-clusterrole.bats @@ -0,0 +1,87 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentClusterRole: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-clusterrole.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentClusterRole: enabled with client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-clusterrole.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentClusterRole: enabled with client.enabled=true and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-clusterrole.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentClusterRole: disabled with client=false and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-clusterrole.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# global.enablePodSecurityPolicies + +@test "client/SnapshotAgentClusterRole: allows podsecuritypolicies access with global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-clusterrole.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules[0].resources[0]' | tee /dev/stderr) + [ "${actual}" = "podsecuritypolicies" ] +} + +#-------------------------------------------------------------------- +# global.bootstrapACLs + +@test "client/SnapshotAgentClusterRole: allows secret access with global.bootsrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-clusterrole.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -r '.rules[0].resources[0]' | tee /dev/stderr) + [ "${actual}" = "secrets" ] +} + +@test "client/SnapshotAgentClusterRole: allows secret access with global.bootsrapACLs=true and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-clusterrole.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules[1].resources[0]' | tee /dev/stderr) + [ "${actual}" = "secrets" ] +} diff --git a/consul-helm/test/unit/client-snapshot-agent-clusterrolebinding.bats b/consul-helm/test/unit/client-snapshot-agent-clusterrolebinding.bats new file mode 100644 index 0000000..887d695 --- /dev/null +++ b/consul-helm/test/unit/client-snapshot-agent-clusterrolebinding.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentClusterRoleBinding: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-clusterrolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentClusterRoleBinding: enabled with client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-clusterrolebinding.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentClusterRoleBinding: enabled with client.enabled=true and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-clusterrolebinding.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentClusterRoleBinding: disabled with client=false and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-clusterrolebinding.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} \ No newline at end of file diff --git a/consul-helm/test/unit/client-snapshot-agent-deployment.bats b/consul-helm/test/unit/client-snapshot-agent-deployment.bats new file mode 100644 index 0000000..781664c --- /dev/null +++ b/consul-helm/test/unit/client-snapshot-agent-deployment.bats @@ -0,0 +1,271 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentDeployment: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: enabled with client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: enabled with client.enabled=true and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: disabled with client=false and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# tolerations + +@test "client/SnapshotAgentDeployment: no tolerations by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: populates tolerations when client.tolerations is populated" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.tolerations=allow' \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations | contains("allow")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# priorityClassName + +@test "client/SnapshotAgentDeployment: no priorityClassName by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.priorityClassName | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: populates priorityClassName when client.priorityClassName is populated" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.priorityClassName=allow' \ + . | tee /dev/stderr | + yq '.spec.template.spec.priorityClassName | contains("allow")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.bootstrapACLs and snapshotAgent.configSecret + +@test "client/SnapshotAgentDeployment: no initContainer by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "client/SnapshotAgentDeployment: populates initContainer when global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: no volumes by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "client/SnapshotAgentDeployment: populates volumes when global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: populates volumes when client.snapshotAgent.configSecret.secretName and client.snapshotAgent.configSecret secretKey are defined" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=secret' \ + --set 'client.snapshotAgent.configSecret.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: no container volumeMounts by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "client/SnapshotAgentDeployment: populates container volumeMounts when global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: populates container volumeMounts when client.snapshotAgent.configSecret.secretName and client.snapshotAgent.configSecret secretKey are defined" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=secret' \ + --set 'client.snapshotAgent.configSecret.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "client/SnapshotAgentDeployment: no nodeSelector by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: populates nodeSelector when client.nodeSelector is populated" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.nodeSelector=allow' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector | contains("allow")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "client/SnapshotAgentDeployment: sets TLS env vars when global.tls.enabled" { + cd `chart_dir` + local env=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "client/SnapshotAgentDeployment: populates volumes when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: populates container volumeMounts when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: can overwrite CA with the provided secret" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that it uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} diff --git a/consul-helm/test/unit/client-snapshot-agent-podsecuritypolicy.bats b/consul-helm/test/unit/client-snapshot-agent-podsecuritypolicy.bats new file mode 100644 index 0000000..6e34b00 --- /dev/null +++ b/consul-helm/test/unit/client-snapshot-agent-podsecuritypolicy.bats @@ -0,0 +1,34 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentPodSecurityPolicy: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-podsecuritypolicy.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentPodSecurityPolicy: disabled with snapshot agent disabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-podsecuritypolicy.yaml \ + --set 'client.snapshotAgent.enabled=false' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentPodSecurityPolicy: enabled with snapshot agent enabled global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-podsecuritypolicy.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/client-snapshot-agent-serviceaccount.bats b/consul-helm/test/unit/client-snapshot-agent-serviceaccount.bats new file mode 100644 index 0000000..5e85e32 --- /dev/null +++ b/consul-helm/test/unit/client-snapshot-agent-serviceaccount.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentServiceAccount: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentServiceAccount: enabled with client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentServiceAccount: enabled with client.enabled=true and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentServiceAccount: disabled with client=false and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} diff --git a/consul-helm/test/unit/connect-inject-authmethod-clusterrole.bats b/consul-helm/test/unit/connect-inject-authmethod-clusterrole.bats new file mode 100644 index 0000000..e7427e3 --- /dev/null +++ b/consul-helm/test/unit/connect-inject-authmethod-clusterrole.bats @@ -0,0 +1,46 @@ +#!/usr/bin/env bats + +load _helpers + +@test "connectInjectAuthMethod/ClusterRole: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-authmethod-clusterrole.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInjectAuthMethod/ClusterRole: enabled with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-authmethod-clusterrole.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInjectAuthMethod/ClusterRole: disabled with connectInject.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-authmethod-clusterrole.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInjectAuthMethod/ClusterRole: enabled with global.bootstrapACLs.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-authmethod-clusterrole.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/connect-inject-authmethod-clusterrolebinding.bats b/consul-helm/test/unit/connect-inject-authmethod-clusterrolebinding.bats new file mode 100644 index 0000000..7375da3 --- /dev/null +++ b/consul-helm/test/unit/connect-inject-authmethod-clusterrolebinding.bats @@ -0,0 +1,46 @@ +#!/usr/bin/env bats + +load _helpers + +@test "connectInjectAuthMethod/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-authmethod-clusterrolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInjectAuthMethod/ClusterRoleBinding: enabled with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-authmethod-clusterrolebinding.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInjectAuthMethod/ClusterRoleBinding: disabled with connectInject.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-authmethod-clusterrolebinding.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInjectAuthMethod/ClusterRoleBinding: enabled with global.bootstrapACLs.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-authmethod-clusterrolebinding.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/connect-inject-authmethod-serviceaccount.bats b/consul-helm/test/unit/connect-inject-authmethod-serviceaccount.bats new file mode 100644 index 0000000..e2b8d63 --- /dev/null +++ b/consul-helm/test/unit/connect-inject-authmethod-serviceaccount.bats @@ -0,0 +1,46 @@ +#!/usr/bin/env bats + +load _helpers + +@test "connectInjectAuthMethod/ServiceAccount: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-authmethod-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInjectAuthMethod/ServiceAccount: enabled with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-authmethod-serviceaccount.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInjectAuthMethod/ServiceAccount: disabled with connectInject.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-authmethod-serviceaccount.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInjectAuthMethod/ServiceAccount: enabled with global.bootstrapACLs.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-authmethod-serviceaccount.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/connect-inject-clusterrole.bats b/consul-helm/test/unit/connect-inject-clusterrole.bats new file mode 100644 index 0000000..df66a8d --- /dev/null +++ b/consul-helm/test/unit/connect-inject-clusterrole.bats @@ -0,0 +1,119 @@ +#!/usr/bin/env bats + +load _helpers + +@test "connectInject/ClusterRole: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrole.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/ClusterRole: enabled with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrole.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/ClusterRole: disabled with connectInject.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrole.yaml \ + --set 'connectInject.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/ClusterRole: disabled with connectInject.certs.secretName set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrole.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.certs.secretName=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/ClusterRole: enabled with connectInject.certs.secretName not set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrole.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.enablePodSecurityPolicies + +@test "connectInject/ClusterRole: no podsecuritypolicies access with global.enablePodSecurityPolicies=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrole.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enablePodSecurityPolicies=false' \ + . | tee /dev/stderr | + yq -r '.rules | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +@test "connectInject/ClusterRole: allows podsecuritypolicies access with global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrole.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules[1].resources[0]' | tee /dev/stderr) + [ "${actual}" = "podsecuritypolicies" ] +} + +#-------------------------------------------------------------------- +# global.bootstrapACLs for namespaces + +@test "connectInject/ClusterRole: does not allow secret access with global.bootsrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrole.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -r '.rules | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +@test "connectInject/ClusterRole: allow secret access with global.bootsrapACLs=true and global.enableConsulNamespaces=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrole.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq -r '.rules[1].resources[0]' | tee /dev/stderr) + [ "${actual}" = "secrets" ] +} + +@test "connectInject/ClusterRole: allows secret access with bootsrapACLs, enablePodSecurityPolicies and enableConsulNamespaces all true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrole.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq -r '.rules[2].resources[0]' | tee /dev/stderr) + [ "${actual}" = "secrets" ] +} diff --git a/consul-helm/test/unit/connect-inject-clusterrolebinding.bats b/consul-helm/test/unit/connect-inject-clusterrolebinding.bats new file mode 100644 index 0000000..6cb38cd --- /dev/null +++ b/consul-helm/test/unit/connect-inject-clusterrolebinding.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats + +load _helpers + +@test "connectInject/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/ClusterRoleBinding: enabled with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrolebinding.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/ClusterRoleBinding: disabled with connectInject.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrolebinding.yaml \ + --set 'connectInject.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/ClusterRoleBinding: disabled with connectInject.certs.secretName set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrolebinding.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.certs.secretName=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/ClusterRoleBinding: enabled with connectInject.certs.secretName not set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-clusterrolebinding.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/connect-inject-deployment.bats b/consul-helm/test/unit/connect-inject-deployment.bats new file mode 100755 index 0000000..0a34871 --- /dev/null +++ b/consul-helm/test/unit/connect-inject-deployment.bats @@ -0,0 +1,783 @@ +#!/usr/bin/env bats + +load _helpers + +@test "connectInject/Deployment: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: enable with global.enabled false, client.enabled true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: disable with connectInject.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: disable with global.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: fails if global.enabled=false" { + cd `chart_dir` + run helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'global.enabled=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled for connect injection" ]] +} + +@test "connectInject/Deployment: fails if global.enabled=true and client.enabled=false" { + cd `chart_dir` + run helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'global.enabled=true' \ + --set 'client.enabled=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled for connect injection" ]] +} + +@test "connectInject/Deployment: fails if global.enabled=false and client.enabled=false" { + cd `chart_dir` + run helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled for connect injection" ]] +} + +@test "connectInject/Deployment: fails if client.grpc=false" { + cd `chart_dir` + run helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'client.grpc=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.grpc must be true for connect injection" ]] +} + +#-------------------------------------------------------------------- +# consul and envoy images + +@test "connectInject/Deployment: container image is global default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.imageK8S=foo' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "\"foo\"" ] +} + +@test "connectInject/Deployment: container image overrides" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.imageK8S=foo' \ + --set 'connectInject.image=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "\"bar\"" ] +} + +@test "connectInject/Deployment: consul-image defaults to global" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'global.image=foo' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-image=\"foo\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: consul-image can be overridden" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'global.image=foo' \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.imageConsul=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-image=\"bar\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: envoy-image is not set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-envoy-image"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: envoy-image can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.imageEnvoy=foo' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-envoy-image=\"foo\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# cert secrets + +@test "connectInject/Deployment: no secretName: no tls-{cert,key}-file set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-tls-cert-file"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-tls-key-file"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-tls-auto"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: with secretName: tls-{cert,key}-file set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.certs.secretName=foo' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-tls-cert-file"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.certs.secretName=foo' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-tls-key-file"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.certs.secretName=foo' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-tls-auto"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + + +#-------------------------------------------------------------------- +# service account name + +@test "connectInject/Deployment: with secretName: no serviceAccountName set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.certs.secretName=foo' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.serviceAccountName | has("serviceAccountName")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: no secretName: serviceAccountName set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.serviceAccountName | contains("connect-injector-webhook-svc-account")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "connectInject/Deployment: nodeSelector is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "connectInject/Deployment: nodeSelector is not set by default with sync enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "connectInject/Deployment: specified nodeSelector" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.nodeSelector=testing' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "testing" ] +} + +#-------------------------------------------------------------------- +# centralConfig + +@test "connectInject/Deployment: centralConfig is enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-enable-central-config"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: centralConfig can be disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-enable-central-config"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: defaultProtocol is disabled by default with centralConfig enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-default-protocol"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: defaultProtocol can be enabled with centralConfig enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.enabled=true' \ + --set 'connectInject.centralConfig.defaultProtocol=grpc' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-default-protocol=\"grpc\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# authMethod + +@test "connectInject/Deployment: -acl-auth-method is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-acl-auth-method="))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: -acl-auth-method is set when global.bootstrapACLs is true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-acl-auth-method=\"release-name-consul-k8s-auth-method\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: -acl-auth-method is set to connectInject.overrideAuthMethodName" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.overrideAuthMethodName=override' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-acl-auth-method=\"override\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: -acl-auth-method is overridden by connectInject.overrideAuthMethodName if global.bootstrapACLs is true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + --set 'connectInject.overrideAuthMethodName=override' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-acl-auth-method=\"override\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "connectInject/Deployment: Adds tls-ca-cert volume when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "connectInject/Deployment: Adds both tls-ca-cert and certs volumes when global.tls.enabled is true and connectInject.certs.secretName is set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'connectInject.certs.secretName=foo' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes | length' | tee /dev/stderr) + [ "${actual}" = "2" ] +} + +@test "connectInject/Deployment: Adds tls-ca-cert volumeMounts when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "connectInject/Deployment: Adds both tls-ca-cert and certs volumeMounts when global.tls.enabled is true and connectInject.certs.secretName is set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'connectInject.certs.secretName=foo' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts | length' | tee /dev/stderr) + [ "${actual}" = "2" ] +} + +@test "connectInject/Deployment: can overwrite CA secret with the provided one" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the volume uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} + +#-------------------------------------------------------------------- +# k8sAllowNamespaces & k8sDenyNamespaces + +@test "connectInject/Deployment: default is allow '*', deny nothing" { + cd `chart_dir` + local object=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'map(select(test("allow-k8s-namespace"))) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] + + local actual=$(echo $object | + yq 'any(contains("allow-k8s-namespace=\"*\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'map(select(test("deny-k8s-namespace"))) | length' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +@test "connectInject/Deployment: can set allow and deny" { + cd `chart_dir` + local object=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.k8sAllowNamespaces[0]=allowNamespace' \ + --set 'connectInject.k8sDenyNamespaces[0]=denyNamespace' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'map(select(test("allow-k8s-namespace"))) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] + + local actual=$(echo $object | + yq 'map(select(test("deny-k8s-namespace"))) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] + + local actual=$(echo $object | + yq 'any(contains("allow-k8s-namespace=\"allowNamespace\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("deny-k8s-namespace=\"denyNamespace\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# namespaces + +@test "connectInject/Deployment: namespace options disabled by default" { + cd `chart_dir` + local object=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("consul-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: namespace options set with .global.enableConsulNamespaces=true" { + cd `chart_dir` + local object=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: mirroring options set with .connectInject.consulNamespaces.mirroringK8S=true" { + cd `chart_dir` + local object=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-k8s-namespace-mirroring=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: prefix can be set with .connectInject.consulNamespaces.mirroringK8SPrefix" { + cd `chart_dir` + local object=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ + --set 'connectInject.consulNamespaces.mirroringK8SPrefix=k8s-' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-k8s-namespace-mirroring=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("k8s-namespace-mirroring-prefix=k8s-"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# namespaces + acl token + +@test "connectInject/Deployment: aclInjectToken disabled when namespaces not enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.aclInjectToken.secretKey=bar' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: aclInjectToken disabled when secretName is missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.aclInjectToken.secretKey=bar' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: aclInjectToken disabled when secretKey is missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.aclInjectToken.secretName=foo' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: aclInjectToken enabled when secretName and secretKey is provided" { + cd `chart_dir` + local object=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.aclInjectToken.secretName=foo' \ + --set 'connectInject.aclInjectToken.secretKey=bar' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name]' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'map(select(test("CONSUL_HTTP_TOKEN"))) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +#-------------------------------------------------------------------- +# namespaces + global.bootstrapACLs + +@test "connectInject/Deployment: CONSUL_HTTP_TOKEN env variable created when global.bootstrapACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] ' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'map(select(test("CONSUL_HTTP_TOKEN"))) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +@test "connectInject/Deployment: init container is created when global.bootstrapACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "injector-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: cross namespace policy is not added when global.bootstrapACLs=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: cross namespace policy is added when global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# namespaces + http address + +@test "connectInject/Deployment: CONSUL_HTTP_ADDR env variable not set when namespaces are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: CONSUL_HTTP_ADDR env variable set when namespaces are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: CONSUL_HTTP_ADDR and CONSUL_CACERT env variables set when namespaces are enabled" { + cd `chart_dir` + local object=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] ' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# namespaces + host ip + +@test "connectInject/Deployment: HOST_IP env variable not set when namespaces are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("HOST_IP"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: HOST_IP env variable set when namespaces are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("HOST_IP"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/connect-inject-mutatingwebhook.bats b/consul-helm/test/unit/connect-inject-mutatingwebhook.bats new file mode 100755 index 0000000..994fe38 --- /dev/null +++ b/consul-helm/test/unit/connect-inject-mutatingwebhook.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats + +load _helpers + +@test "connectInject/MutatingWebhookConfiguration: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-mutatingwebhook.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/MutatingWebhookConfiguration: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-mutatingwebhook.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/MutatingWebhookConfiguration: disable with connectInject.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-mutatingwebhook.yaml \ + --set 'connectInject.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/MutatingWebhookConfiguration: disable with global.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-mutatingwebhook.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/MutatingWebhookConfiguration: namespace is set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-mutatingwebhook.yaml \ + --set 'connectInject.enabled=true' \ + --namespace foo \ + . | tee /dev/stderr | + yq '.webhooks[0].clientConfig.service.namespace' | tee /dev/stderr) + [ "${actual}" = "\"foo\"" ] +} diff --git a/consul-helm/test/unit/connect-inject-podsecuritypolicy.bats b/consul-helm/test/unit/connect-inject-podsecuritypolicy.bats new file mode 100644 index 0000000..ee14dc3 --- /dev/null +++ b/consul-helm/test/unit/connect-inject-podsecuritypolicy.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +load _helpers + +@test "connectInject/PodSecurityPolicy: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-podsecuritypolicy.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/PodSecurityPolicy: disabled by default with connectInject enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-podsecuritypolicy.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/PodSecurityPolicy: disabled with connectInject disabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-podsecuritypolicy.yaml \ + --set 'connectInject.enabled=false' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/PodSecurityPolicy: enabled with connectInject enabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-podsecuritypolicy.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/connect-inject-service.bats b/consul-helm/test/unit/connect-inject-service.bats new file mode 100755 index 0000000..6bb6e65 --- /dev/null +++ b/consul-helm/test/unit/connect-inject-service.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +load _helpers + +@test "connectInject/Service: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-service.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Service: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-service.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Service: disable with connectInject.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-service.yaml \ + --set 'connectInject.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Service: disable with global.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-service.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} diff --git a/consul-helm/test/unit/connect-inject-serviceaccount.bats b/consul-helm/test/unit/connect-inject-serviceaccount.bats new file mode 100644 index 0000000..8c3944a --- /dev/null +++ b/consul-helm/test/unit/connect-inject-serviceaccount.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats + +load _helpers + +@test "connectInject/ServiceAccount: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/ServiceAccount: enabled with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-serviceaccount.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/ServiceAccount: disabled with connectInject.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-serviceaccount.yaml \ + --set 'connectInject.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/ServiceAccount: disabled with connectInject.certs.secretName set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-serviceaccount.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.certs.secretName=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/ServiceAccount: enabled with connectInject.certs.secretName not set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/connect-inject-serviceaccount.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/dns-service.bats b/consul-helm/test/unit/dns-service.bats new file mode 100755 index 0000000..787ba12 --- /dev/null +++ b/consul-helm/test/unit/dns-service.bats @@ -0,0 +1,89 @@ +#!/usr/bin/env bats + +load _helpers + +@test "dns/Service: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/dns-service.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "dns/Service: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/dns-service.yaml \ + --set 'global.enabled=false' \ + --set 'dns.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "dns/Service: disable with dns.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/dns-service.yaml \ + --set 'dns.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "dns/Service: disable with global.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/dns-service.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# annotations + +@test "dns/Service: no annotations by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/dns-service.yaml \ + --set 'dns.enabled=true' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "dns/Service: can set annotations" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/dns-service.yaml \ + --set 'dns.enabled=true' \ + --set 'dns.annotations=key: value' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.key' | tee /dev/stderr) + [ "${actual}" = "value" ] +} + +#-------------------------------------------------------------------- +# clusterIP + +@test "dns/Service: clusterIP not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/dns-service.yaml \ + . | tee /dev/stderr | + yq '.spec | .clusterIP? == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "dns/Service: specified clusterIP" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/dns-service.yaml \ + --set 'dns.clusterIP=192.168.1.1' \ + . | tee /dev/stderr | + yq '.spec | .clusterIP == "192.168.1.1"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/enterprise-license-clusterrole.bats b/consul-helm/test/unit/enterprise-license-clusterrole.bats new file mode 100644 index 0000000..4f02d3e --- /dev/null +++ b/consul-helm/test/unit/enterprise-license-clusterrole.bats @@ -0,0 +1,97 @@ +#!/usr/bin/env bats + +load _helpers + +@test "enterpriseLicense/ClusterRole: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrole.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/ClusterRole: disabled with server=false, ent secret defined" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrole.yaml \ + --set 'server.enabled=false' \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/ClusterRole: disabled when ent secretName missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrole.yaml \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/ClusterRole: disabled when ent secretKey missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrole.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/ClusterRole: enabled when ent license defined" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrole.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "enterpriseLicense/ClusterRole: rules are empty if global.bootstrapACLs and global.enablePodSecurityPolicies are false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrole.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq '.rules | length' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +#-------------------------------------------------------------------- +# global.bootstrapACLs + +@test "enterpriseLicense/ClusterRole: allows acl token when global.bootstrapACLs is true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrole.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -r '.rules | map(select(.resourceNames[0] == "release-name-consul-enterprise-license-acl-token")) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + + +#-------------------------------------------------------------------- +# global.enablePodSecurityPolicies + +@test "enterpriseLicense/ClusterRole: allows podsecuritypolicies access with global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrole.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules | map(select(.resources[0] == "podsecuritypolicies")) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} diff --git a/consul-helm/test/unit/enterprise-license-clusterrolebinding.bats b/consul-helm/test/unit/enterprise-license-clusterrolebinding.bats new file mode 100644 index 0000000..eee9bbf --- /dev/null +++ b/consul-helm/test/unit/enterprise-license-clusterrolebinding.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats + +load _helpers + +@test "enterpriseLicense/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/ClusterRoleBinding: disabled with server=false, ent secret defined" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrolebinding.yaml \ + --set 'server.enabled=false' \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/ClusterRoleBinding: disabled when ent secretName missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrolebinding.yaml \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/ClusterRoleBinding: disabled when ent secretKey missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrolebinding.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/ClusterRoleBinding: enabled when ent license defined" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-clusterrolebinding.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/enterprise-license-job.bats b/consul-helm/test/unit/enterprise-license-job.bats new file mode 100644 index 0000000..3a8ce55 --- /dev/null +++ b/consul-helm/test/unit/enterprise-license-job.bats @@ -0,0 +1,201 @@ +#!/usr/bin/env bats + +load _helpers + +@test "server/EnterpriseLicense: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/EnterpriseLicense: disabled when servers are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enabled=false' \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/EnterpriseLicense: disabled when secretName is missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/EnterpriseLicense: disabled when secretKey is missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/EnterpriseLicense: enabled when secretName and secretKey is provided" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.bootstrapACLs + +@test "server/EnterpriseLicense: CONSUL_HTTP_TOKEN env variable created when global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/EnterpriseLicense: init container is created when global.bootstrapACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "ent-license-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "server/EnterpriseLicense: no volumes when TLS is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.tls.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes | length' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +@test "server/EnterpriseLicense: volumes present when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +@test "server/EnterpriseLicense: no volumes mounted when TLS is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.tls.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts | length' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +@test "server/EnterpriseLicense: volumes mounted when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +@test "server/EnterpriseLicense: URL is http when TLS is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.tls.enabled=false' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[] | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "http://release-name-consul-server:8500" ] +} + +@test "server/EnterpriseLicense: URL is https when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[] | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "https://release-name-consul-server:8501" ] +} + +@test "server/EnterpriseLicense: CA certificate is specified when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[] | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "server/EnterpriseLicense: can overwrite CA secret with the provided one" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + -x templates/enterprise-license-job.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the volume uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} diff --git a/consul-helm/test/unit/enterprise-license-podsecuritypolicy.bats b/consul-helm/test/unit/enterprise-license-podsecuritypolicy.bats new file mode 100644 index 0000000..25908e4 --- /dev/null +++ b/consul-helm/test/unit/enterprise-license-podsecuritypolicy.bats @@ -0,0 +1,68 @@ +#!/usr/bin/env bats + +load _helpers + +@test "enterpriseLicense/PodSecurityPolicy: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-podsecuritypolicy.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/PodSecurityPolicy: disabled with server=false, ent secret defined" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-podsecuritypolicy.yaml \ + --set 'server.enabled=false' \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/PodSecurityPolicy: disabled when ent secretName missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-podsecuritypolicy.yaml \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/PodSecurityPolicy: disabled when ent secretKey missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-podsecuritypolicy.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/PodSecurityPolicy: disabled when enablePodSecurityPolicies=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-podsecuritypolicy.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.enablePodSecurityPolicies=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/PodSecurityPolicy: enabled when ent license defined and enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-podsecuritypolicy.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/enterprise-license-serviceaccount.bats b/consul-helm/test/unit/enterprise-license-serviceaccount.bats new file mode 100644 index 0000000..de1970b --- /dev/null +++ b/consul-helm/test/unit/enterprise-license-serviceaccount.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats + +load _helpers + +@test "enterpriseLicense/ServiceAccount: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/ServiceAccount: disabled with server=false, ent secret defined" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-serviceaccount.yaml \ + --set 'server.enabled=false' \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/ServiceAccount: disabled when ent secretName missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-serviceaccount.yaml \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/ServiceAccount: disabled when ent secretKey missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-serviceaccount.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "enterpriseLicense/ServiceAccount: enabled when ent license defined" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/enterprise-license-serviceaccount.yaml \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/helpers.bats b/consul-helm/test/unit/helpers.bats new file mode 100644 index 0000000..614e3fc --- /dev/null +++ b/consul-helm/test/unit/helpers.bats @@ -0,0 +1,102 @@ +#!/usr/bin/env bats +# This file tests the helpers in _helpers.tpl. + +load _helpers + +#-------------------------------------------------------------------- +# consul.fullname +# These tests use test-runner.yaml to test the consul.fullname helper +# since we need an existing template that calls the consul.fullname helper. + +@test "helper/consul.fullname: defaults to release-name-consul" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tests/test-runner.yaml \ + . | tee /dev/stderr | + yq -r '.metadata.name' | tee /dev/stderr) + [ "${actual}" = "release-name-consul-test" ] +} + +@test "helper/consul.fullname: fullnameOverride overrides the name" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tests/test-runner.yaml \ + --set fullnameOverride=override \ + . | tee /dev/stderr | + yq -r '.metadata.name' | tee /dev/stderr) + [ "${actual}" = "override-test" ] +} + +@test "helper/consul.fullname: fullnameOverride is truncated to 63 chars" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tests/test-runner.yaml \ + --set fullnameOverride=abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz \ + . | tee /dev/stderr | + yq -r '.metadata.name' | tee /dev/stderr) + [ "${actual}" = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijk-test" ] +} + +@test "helper/consul.fullname: fullnameOverride has trailing '-' trimmed" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tests/test-runner.yaml \ + --set fullnameOverride=override- \ + . | tee /dev/stderr | + yq -r '.metadata.name' | tee /dev/stderr) + [ "${actual}" = "override-test" ] +} + +@test "helper/consul.fullname: global.name overrides the name" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tests/test-runner.yaml \ + --set global.name=override \ + . | tee /dev/stderr | + yq -r '.metadata.name' | tee /dev/stderr) + [ "${actual}" = "override-test" ] +} + +@test "helper/consul.fullname: global.name is truncated to 63 chars" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tests/test-runner.yaml \ + --set global.name=abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz \ + . | tee /dev/stderr | + yq -r '.metadata.name' | tee /dev/stderr) + [ "${actual}" = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijk-test" ] +} + +@test "helper/consul.fullname: global.name has trailing '-' trimmed" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tests/test-runner.yaml \ + --set global.name=override- \ + . | tee /dev/stderr | + yq -r '.metadata.name' | tee /dev/stderr) + [ "${actual}" = "override-test" ] +} + +@test "helper/consul.fullname: nameOverride is supported" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tests/test-runner.yaml \ + --set nameOverride=override \ + . | tee /dev/stderr | + yq -r '.metadata.name' | tee /dev/stderr) + [ "${actual}" = "release-name-override-test" ] +} + +# This test ensures that we use {{ template "consul.fullname" }} everywhere instead of +# {{ .Release.Name }} because that's required in order to support the name +# override settings fullnameOverride and global.name. In some cases, we need to +# use .Release.Name. In those cases, add your exception to this list. +# +# If this test fails, you're likely using {{ .Release.Name }} where you should +# be using {{ template "consul.fullname" }} +@test "helper/consul.fullname: used everywhere" { + cd `chart_dir` + # Grep for uses of .Release.Name that aren't using it as a label. + local actual=$(grep -r '{{ .Release.Name }}' templates/*.yaml | grep -v 'release: ' | tee /dev/stderr ) + [ "${actual}" = 'templates/server-acl-init-job.yaml: -server-label-selector=component=server,app={{ template "consul.name" . }},release={{ .Release.Name }} \' ] +} diff --git a/consul-helm/test/unit/mesh-gateway-clusterrole.bats b/consul-helm/test/unit/mesh-gateway-clusterrole.bats new file mode 100644 index 0000000..1f3db82 --- /dev/null +++ b/consul-helm/test/unit/mesh-gateway-clusterrole.bats @@ -0,0 +1,76 @@ +#!/usr/bin/env bats + +load _helpers + +@test "meshGateway/ClusterRole: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-clusterrole.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "meshGateway/ClusterRole: enabled with meshGateway, connectInject and client.grpc enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-clusterrole.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "meshGateway/ClusterRole: rules for PodSecurityPolicy" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-clusterrole.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules[0].resources[0]' | tee /dev/stderr) + [ "${actual}" = "podsecuritypolicies" ] +} + +@test "meshGateway/ClusterRole: rules for global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-clusterrole.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -r '.rules[0].resources[0]' | tee /dev/stderr) + [ "${actual}" = "secrets" ] +} + +@test "meshGateway/ClusterRole: rules is empty if no ACLs or PSPs" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-clusterrole.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.rules' | tee /dev/stderr) + [ "${actual}" = "[]" ] +} + +@test "meshGateway/ClusterRole: rules for both ACLs and PSPs" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-clusterrole.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules | length' | tee /dev/stderr) + [ "${actual}" = "2" ] +} diff --git a/consul-helm/test/unit/mesh-gateway-clusterrolebinding.bats b/consul-helm/test/unit/mesh-gateway-clusterrolebinding.bats new file mode 100644 index 0000000..92400bb --- /dev/null +++ b/consul-helm/test/unit/mesh-gateway-clusterrolebinding.bats @@ -0,0 +1,38 @@ +#!/usr/bin/env bats + +load _helpers + +@test "meshGateway/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-clusterrolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "meshGateway/ClusterRoleBinding: enabled with meshGateway, connectInject and client.grpc enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-clusterrolebinding.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "meshGateway/ClusterRoleBinding: subject name is correct" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-clusterrolebinding.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --name 'release-name' \ + . | tee /dev/stderr | + yq -r '.subjects[0].name' | tee /dev/stderr) + [ "${actual}" = "release-name-consul-mesh-gateway" ] +} + diff --git a/consul-helm/test/unit/mesh-gateway-deployment.bats b/consul-helm/test/unit/mesh-gateway-deployment.bats new file mode 100755 index 0000000..8523760 --- /dev/null +++ b/consul-helm/test/unit/mesh-gateway-deployment.bats @@ -0,0 +1,656 @@ +#!/usr/bin/env bats + +load _helpers + +@test "meshGateway/Deployment: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "meshGateway/Deployment: enabled with meshGateway, connectInject and client.grpc enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# prerequisites + +@test "meshGateway/Deployment: fails if connectInject.enabled=false" { + cd `chart_dir` + run helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=false' \ + --set 'client.grpc=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "connectInject.enabled must be true" ]] +} + +@test "meshGateway/Deployment: fails if client.grpc=false" { + cd `chart_dir` + run helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'client.grpc=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.grpc must be true" ]] +} + +@test "meshGateway/Deployment: fails if global.enabled is false and clients are not explicitly enabled" { + cd `chart_dir` + run helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'client.grpc=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + +@test "meshGateway/Deployment: fails if global.enabled is true but clients are explicitly disabled" { + cd `chart_dir` + run helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'client.grpc=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=true' \ + --set 'client.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + +#-------------------------------------------------------------------- +# annotations + +@test "meshGateway/Deployment: no extra annotations by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +@test "meshGateway/Deployment: extra annotations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.annotations=key1: value1 +key2: value2' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | length' | tee /dev/stderr) + [ "${actual}" = "3" ] +} + +#-------------------------------------------------------------------- +# replicas + +@test "meshGateway/Deployment: replicas defaults to 2" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.spec.replicas' | tee /dev/stderr) + [ "${actual}" = "2" ] +} + +@test "meshGateway/Deployment: replicas can be overridden" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.replicas=3' \ + . | tee /dev/stderr | + yq -r '.spec.replicas' | tee /dev/stderr) + [ "${actual}" = "3" ] +} + +#-------------------------------------------------------------------- +# affinity + +@test "meshGateway/Deployment: affinity defaults to one per node" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey' | tee /dev/stderr) + [ "${actual}" = "kubernetes.io/hostname" ] +} + +@test "meshGateway/Deployment: affinity can be overridden" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.affinity=key: value' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.affinity.key' | tee /dev/stderr) + [ "${actual}" = "value" ] +} + +#-------------------------------------------------------------------- +# tolerations + +@test "meshGateway/Deployment: no tolerations by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.tolerations' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "meshGateway/Deployment: tolerations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.tolerations=- key: value' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.tolerations[0].key' | tee /dev/stderr) + [ "${actual}" = "value" ] +} + +#-------------------------------------------------------------------- +# hostNetwork + + +@test "meshGateway/Deployment: hostNetwork is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.hostNetwork' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "meshGateway/Deployment: hostNetwork can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.hostNetwork=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.hostNetwork' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# dnsPolicy + +@test "meshGateway/Deployment: no dnsPolicy by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.dnsPolicy' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "meshGateway/Deployment: dnsPolicy can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.dnsPolicy=ClusterFirst' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.dnsPolicy' | tee /dev/stderr) + [ "${actual}" = "ClusterFirst" ] +} + +#-------------------------------------------------------------------- +# BootstrapACLs + +@test "meshGateway/Deployment: global.BootstrapACLs enabled creates init container and secret" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr ) + local init_container=$(echo "${actual}" | yq -r '.spec.template.spec.initContainers[1].name' | tee /dev/stderr) + [ "${init_container}" = "mesh-gateway-acl-init" ] + + local secret=$(echo "${actual}" | yq -r '.spec.template.spec.containers[0].env[2].name' | tee /dev/stderr) + [ "${secret}" = "CONSUL_HTTP_TOKEN" ] +} + +#-------------------------------------------------------------------- +# envoyImage + +@test "meshGateway/Deployment: envoy image has default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "envoyproxy/envoy:v1.13.0" ] +} + +@test "meshGateway/Deployment: envoy image can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.imageEnvoy=new/image' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "new/image" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "meshGateway/Deployment: resources has default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + + [ $(echo "${actual}" | yq -r '.requests.memory') = "128Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "250m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "256Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "500m" ] +} + +@test "meshGateway/Deployment: resources can be overridden" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.resources=requests: yadayada' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources.requests' | tee /dev/stderr) + [ "${actual}" = "yadayada" ] +} + +#-------------------------------------------------------------------- +# containerPort + +@test "meshGateway/Deployment: containerPort defaults to 443" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr \ + | yq '.spec.template.spec.containers[0]' | tee /dev/stderr) + + [[ $(echo "$actual" | yq -r '.command[2]') =~ '-address="${POD_IP}:443"' ]] + [ $(echo "$actual" | yq -r '.ports[0].containerPort') = "443" ] + [ $(echo "$actual" | yq -r '.livenessProbe.tcpSocket.port') = "443" ] + [ $(echo "$actual" | yq -r '.readinessProbe.tcpSocket.port') = "443" ] +} + +@test "meshGateway/Deployment: containerPort can be overridden" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.containerPort=8443' \ + . | tee /dev/stderr \ + | yq '.spec.template.spec.containers[0]' | tee /dev/stderr) + + [[ $(echo "$actual" | yq -r '.command[2]') =~ '-address="${POD_IP}:8443"' ]] + [ $(echo "$actual" | yq -r '.ports[0].containerPort') = "8443" ] + [ $(echo "$actual" | yq -r '.livenessProbe.tcpSocket.port') = "8443" ] + [ $(echo "$actual" | yq -r '.readinessProbe.tcpSocket.port') = "8443" ] +} + +#-------------------------------------------------------------------- +# wanAddress + +@test "meshGateway/Deployment: wanAddress.port defaults to 443" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.wanAddress.useNodeIP=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2]' | tee /dev/stderr) + [[ "${actual}" =~ '-wan-address="${HOST_IP}:443"' ]] +} + +@test "meshGateway/Deployment: wanAddress uses NodeIP by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2]' | tee /dev/stderr) + [[ "${actual}" =~ '-wan-address="${HOST_IP}:443"' ]] +} + +@test "meshGateway/Deployment: wanAddress.useNodeIP" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.wanAddress.useNodeIP=true' \ + --set 'meshGateway.wanAddress.port=4444' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2]' | tee /dev/stderr) + [[ "${actual}" =~ '-wan-address="${HOST_IP}:4444"' ]] +} + +@test "meshGateway/Deployment: wanAddress.useNodeName" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.wanAddress.useNodeIP=false' \ + --set 'meshGateway.wanAddress.useNodeName=true' \ + --set 'meshGateway.wanAddress.port=4444' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2]' | tee /dev/stderr) + [[ "${actual}" =~ '-wan-address="${NODE_NAME}:4444"' ]] +} + +@test "meshGateway/Deployment: wanAddress.host" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.wanAddress.useNodeIP=false' \ + --set 'meshGateway.wanAddress.useNodeName=false' \ + --set 'meshGateway.wanAddress.host=myhost' \ + --set 'meshGateway.wanAddress.port=4444' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2]' | tee /dev/stderr) + [[ "${actual}" =~ '-wan-address="myhost:4444"' ]] +} + +#-------------------------------------------------------------------- +# consulServiceName + +@test "meshGateway/Deployment: fails if consulServiceName is set and bootstrapACLs is true" { + cd `chart_dir` + run helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.consulServiceName=override' \ + --set 'global.bootstrapACLs=true' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "if global.bootstrapACLs is true, meshGateway.consulServiceName cannot be set" ]] +} + +@test "meshGateway/Deployment: does not fail if consulServiceName is set to mesh-gateway and bootstrapACLs is true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.consulServiceName=mesh-gateway' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr \ + | yq '.spec.template.spec.containers[0]' | tee /dev/stderr ) + + [[ $(echo "${actual}" | yq -r '.command[2]' ) =~ '-service="mesh-gateway"' ]] + [[ $(echo "${actual}" | yq -r '.lifecycle.preStop.exec.command' ) =~ '-id=\"mesh-gateway\"' ]] +} + +@test "meshGateway/Deployment: consulServiceName can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.consulServiceName=overridden' \ + . | tee /dev/stderr \ + | yq '.spec.template.spec.containers[0]' | tee /dev/stderr ) + + [[ $(echo "${actual}" | yq -r '.command[2]' ) =~ '-service="overridden"' ]] + [[ $(echo "${actual}" | yq -r '.lifecycle.preStop.exec.command' ) =~ '-id=\"overridden\"' ]] +} + +#-------------------------------------------------------------------- +# healthchecks + +@test "meshGateway/Deployment: healthchecks are on by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr \ + | yq '.spec.template.spec.containers[0]' | tee /dev/stderr ) + + local liveness=$(echo "${actual}" | yq -r '.livenessProbe | length > 0' | tee /dev/stderr) + [ "${liveness}" = "true" ] + local readiness=$(echo "${actual}" | yq -r '.readinessProbe | length > 0' | tee /dev/stderr) + [ "${readiness}" = "true" ] +} + +@test "meshGateway/Deployment: can disable healthchecks" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.enableHealthChecks=false' \ + . | tee /dev/stderr \ + | yq '.spec.template.spec.containers[0]' | tee /dev/stderr ) + + local liveness=$(echo "${actual}" | yq -r '.livenessProbe | length > 0' | tee /dev/stderr) + [ "${liveness}" = "false" ] + local readiness=$(echo "${actual}" | yq -r '.readinessProbe | length > 0' | tee /dev/stderr) + [ "${readiness}" = "false" ] +} + +#-------------------------------------------------------------------- +# hostPort + +@test "meshGateway/Deployment: no hostPort by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].ports[0].hostPort' | tee /dev/stderr) + + [ "${actual}" = "null" ] +} + +@test "meshGateway/Deployment: can set a hostPort" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.hostPort=443' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].ports[0].hostPort' | tee /dev/stderr) + + [ "${actual}" = "443" ] +} + +#-------------------------------------------------------------------- +# priorityClassName + +@test "meshGateway/Deployment: no priorityClassName by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.priorityClassName' | tee /dev/stderr) + + [ "${actual}" = "null" ] +} + +@test "meshGateway/Deployment: can set a priorityClassName" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.priorityClassName=name' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.priorityClassName' | tee /dev/stderr) + + [ "${actual}" = "name" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "meshGateway/Deployment: no nodeSelector by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) + + [ "${actual}" = "null" ] +} + +@test "meshGateway/Deployment: can set a nodeSelector" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.nodeSelector=key: value' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.nodeSelector.key' | tee /dev/stderr) + + [ "${actual}" = "value" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "meshGateway/Deployment: sets TLS flags when global.tls.enabled" { + cd `chart_dir` + local env=$(helm template \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_GRPC_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8502' ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "meshGateway/Deployment: can overwrite CA secret with the provided one" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -x templates/client-snapshot-agent-deployment.yaml \ + -x templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the volume uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} diff --git a/consul-helm/test/unit/mesh-gateway-podsecuritypolicy.bats b/consul-helm/test/unit/mesh-gateway-podsecuritypolicy.bats new file mode 100644 index 0000000..2fb55cf --- /dev/null +++ b/consul-helm/test/unit/mesh-gateway-podsecuritypolicy.bats @@ -0,0 +1,25 @@ +#!/usr/bin/env bats + +load _helpers + +@test "meshGateway/PodSecurityPolicy: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-podsecuritypolicy.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "meshGateway/PodSecurityPolicy: enabled with meshGateway, connectInject and client.grpc enabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-podsecuritypolicy.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/mesh-gateway-service.bats b/consul-helm/test/unit/mesh-gateway-service.bats new file mode 100755 index 0000000..ac8a7c3 --- /dev/null +++ b/consul-helm/test/unit/mesh-gateway-service.bats @@ -0,0 +1,204 @@ +#!/usr/bin/env bats + +load _helpers + +@test "meshGateway/Service: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "meshGateway/Service: disabled by default with meshGateway, connectInject and client.grpc enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "meshGateway/Service: enabled with meshGateway.enabled=true meshGateway.service.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.service.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# annotations + +@test "meshGateway/Service: no annotations by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.service.enabled=true' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "meshGateway/Service: can set annotations" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.service.enabled=true' \ + --set 'meshGateway.service.annotations=key: value' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.key' | tee /dev/stderr) + [ "${actual}" = "value" ] +} + +#-------------------------------------------------------------------- +# port + +@test "meshGateway/Service: has default port" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.service.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.ports[0].port' | tee /dev/stderr) + [ "${actual}" = "443" ] +} + +@test "meshGateway/Service: can set port" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.service.enabled=true' \ + --set 'meshGateway.service.port=8443' \ + . | tee /dev/stderr | + yq -r '.spec.ports[0].port' | tee /dev/stderr) + [ "${actual}" = "8443" ] +} + +#-------------------------------------------------------------------- +# targetPort + +@test "meshGateway/Service: has default targetPort" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.service.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.ports[0].targetPort' | tee /dev/stderr) + [ "${actual}" = "443" ] +} + +@test "meshGateway/Service: uses targetPort from containerPort" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.service.enabled=true' \ + --set 'meshGateway.containerPort=8443' \ + . | tee /dev/stderr | + yq -r '.spec.ports[0].targetPort' | tee /dev/stderr) + [ "${actual}" = "8443" ] +} + +#-------------------------------------------------------------------- +# nodePort + +@test "meshGateway/Service: no nodePort by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.service.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "meshGateway/Service: can set a nodePort" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.service.enabled=true' \ + --set 'meshGateway.service.nodePort=8443' \ + . | tee /dev/stderr | + yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) + [ "${actual}" = "8443" ] +} + +#-------------------------------------------------------------------- +# Service type + +@test "meshGateway/Service: defaults to type ClusterIP" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.service.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.type' | tee /dev/stderr) + [ "${actual}" = "ClusterIP" ] +} + +@test "meshGateway/Service: can set type" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.service.enabled=true' \ + --set 'meshGateway.service.type=LoadBalancer' \ + . | tee /dev/stderr | + yq -r '.spec.type' | tee /dev/stderr) + [ "${actual}" = "LoadBalancer" ] +} + +#-------------------------------------------------------------------- +# additionalSpec + +@test "meshGateway/Service: can add additionalSpec" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-service.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + --set 'meshGateway.service.enabled=true' \ + --set 'meshGateway.service.additionalSpec=key: value' \ + . | tee /dev/stderr | + yq -r '.spec.key' | tee /dev/stderr) + [ "${actual}" = "value" ] +} diff --git a/consul-helm/test/unit/mesh-gateway-serviceaccount.bats b/consul-helm/test/unit/mesh-gateway-serviceaccount.bats new file mode 100644 index 0000000..5eaf647 --- /dev/null +++ b/consul-helm/test/unit/mesh-gateway-serviceaccount.bats @@ -0,0 +1,25 @@ +#!/usr/bin/env bats + +load _helpers + +@test "meshGateway/ServiceAccount: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "meshGateway/ServiceAccount: enabled with meshGateway, connectInject and client.grpc enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/mesh-gateway-serviceaccount.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + diff --git a/consul-helm/test/unit/server-acl-init-cleanup-clusterrole.bats b/consul-helm/test/unit/server-acl-init-cleanup-clusterrole.bats new file mode 100644 index 0000000..badd1a4 --- /dev/null +++ b/consul-helm/test/unit/server-acl-init-cleanup-clusterrole.bats @@ -0,0 +1,58 @@ +#!/usr/bin/env bats + +load _helpers + +@test "serverACLInitCleanup/ClusterRole: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-clusterrole.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInitCleanup/ClusterRole: enabled with global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-clusterrole.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInitCleanup/ClusterRole: disabled with server=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-clusterrole.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInitCleanup/ClusterRole: enabled with client=true and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-clusterrole.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.enablePodSecurityPolicies + +@test "serverACLInitCleanup/ClusterRole: allows podsecuritypolicies access with global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-clusterrole.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules | map(select(.resources[0] == "podsecuritypolicies")) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} diff --git a/consul-helm/test/unit/server-acl-init-cleanup-clusterrolebinding.bats b/consul-helm/test/unit/server-acl-init-cleanup-clusterrolebinding.bats new file mode 100644 index 0000000..2f16966 --- /dev/null +++ b/consul-helm/test/unit/server-acl-init-cleanup-clusterrolebinding.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +load _helpers + +@test "serverACLInitCleanup/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-clusterrolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInitCleanup/ClusterRoleBinding: enabled with global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-clusterrolebinding.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInitCleanup/ClusterRoleBinding: disabled with server=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-clusterrolebinding.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInitCleanup/ClusterRoleBinding: enabled with client=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-clusterrolebinding.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/server-acl-init-cleanup-job.bats b/consul-helm/test/unit/server-acl-init-cleanup-job.bats new file mode 100644 index 0000000..56dbee2 --- /dev/null +++ b/consul-helm/test/unit/server-acl-init-cleanup-job.bats @@ -0,0 +1,65 @@ +#!/usr/bin/env bats + +load _helpers + +@test "serverACLInitCleanup/Job: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-job.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInitCleanup/Job: enabled with global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-job.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInitCleanup/Job: disabled with server=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInitCleanup/Job: enabled with client=true and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInitCleanup/Job: disabled when server.updatePartition > 0" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.updatePartition=1' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInitCleanup/Job: consul-k8s delete-completed-job is called with correct arguments" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-job.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -c '.spec.template.spec.containers[0].args' | tee /dev/stderr) + [ "${actual}" = '["delete-completed-job","-k8s-namespace=default","release-name-consul-server-acl-init"]' ] +} diff --git a/consul-helm/test/unit/server-acl-init-cleanup-podsecuritypolicy.bats b/consul-helm/test/unit/server-acl-init-cleanup-podsecuritypolicy.bats new file mode 100644 index 0000000..a19bff5 --- /dev/null +++ b/consul-helm/test/unit/server-acl-init-cleanup-podsecuritypolicy.bats @@ -0,0 +1,34 @@ +#!/usr/bin/env bats + +load _helpers + +@test "serverACLInitCleanup/PodSecurityPolicy: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-podsecuritypolicy.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInitCleanup/PodSecurityPolicy: disabled with global.bootstrapACLs=true and global.enablePodSecurityPolicies=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-podsecuritypolicy.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enablePodSecurityPolicies=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInitCleanup/PodSecurityPolicy: enabled with global.bootstrapACLs=true and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-podsecuritypolicy.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/server-acl-init-cleanup-serviceaccount.bats b/consul-helm/test/unit/server-acl-init-cleanup-serviceaccount.bats new file mode 100644 index 0000000..0c1d1d9 --- /dev/null +++ b/consul-helm/test/unit/server-acl-init-cleanup-serviceaccount.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +load _helpers + +@test "serverACLInitCleanup/ServiceAccount: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInitCleanup/ServiceAccount: enabled with global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-serviceaccount.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInitCleanup/ServiceAccount: disabled with server=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-serviceaccount.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInitCleanup/ServiceAccount: enabled with client=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-cleanup-serviceaccount.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/server-acl-init-clusterrole.bats b/consul-helm/test/unit/server-acl-init-clusterrole.bats new file mode 100644 index 0000000..f63f60f --- /dev/null +++ b/consul-helm/test/unit/server-acl-init-clusterrole.bats @@ -0,0 +1,72 @@ +#!/usr/bin/env bats + +load _helpers + +@test "serverACLInit/ClusterRole: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-clusterrole.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/ClusterRole: enabled with global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-clusterrole.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInit/ClusterRole: disabled with server=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-clusterrole.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/ClusterRole: enabled with client=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-clusterrole.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# connectInject.enabled + +@test "serverACLInit/ClusterRole: allows service accounts when connectInject.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-clusterrole.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -r '.rules | map(select(.resources[0] == "serviceaccounts")) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +#-------------------------------------------------------------------- +# global.enablePodSecurityPolicies + +@test "serverACLInit/ClusterRole: allows podsecuritypolicies access with global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-clusterrole.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules | map(select(.resources[0] == "podsecuritypolicies")) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} diff --git a/consul-helm/test/unit/server-acl-init-clusterrolebinding.bats b/consul-helm/test/unit/server-acl-init-clusterrolebinding.bats new file mode 100644 index 0000000..8ccdc22 --- /dev/null +++ b/consul-helm/test/unit/server-acl-init-clusterrolebinding.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +load _helpers + +@test "serverACLInit/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-clusterrolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/ClusterRoleBinding: enabled with global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-clusterrolebinding.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInit/ClusterRoleBinding: disabled with server=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-clusterrolebinding.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/ClusterRoleBinding: enabled with client=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-clusterrolebinding.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/server-acl-init-job.bats b/consul-helm/test/unit/server-acl-init-job.bats new file mode 100644 index 0000000..29150d4 --- /dev/null +++ b/consul-helm/test/unit/server-acl-init-job.bats @@ -0,0 +1,660 @@ +#!/usr/bin/env bats + +load _helpers + +@test "serverACLInit/Job: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: enabled with global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInit/Job: disabled with server=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: enabled with client=false global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInit/Job: disabled when server.updatePartition > 0" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.updatePartition=1' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: does not set -create-client-token=false when client is enabled (the default)" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command[2] | contains("-create-client-token=false")' | + tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: sets -create-client-token=false when client is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command[2] | contains("-create-client-token=false")' | + tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# dns + +@test "serverACLInit/Job: dns acl option enabled with .dns.enabled=-" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("allow-dns"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInit/Job: dns acl option enabled with .dns.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'dns.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("allow-dns"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInit/Job: dns acl option disabled with .dns.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'dns.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("allow-dns"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# aclBindingRuleSelector/global.bootstrapACLs + +@test "serverACLInit/Job: no acl-binding-rule-selector flag by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'connectInject.aclBindingRuleSlector=foo' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: can specify acl-binding-rule-selector" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + --set 'connectInject.aclBindingRuleSelector="foo"' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-acl-binding-rule-selector=\"foo\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# enterpriseLicense + +@test "serverACLInit/Job: ent license acl option enabled with server.enterpriseLicense.secretName and server.enterpriseLicense.secretKey set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.enterpriseLicense.secretName=foo' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-create-enterprise-license-token"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInit/Job: ent license acl option disabled missing server.enterpriseLicense.secretName" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-create-enterprise-license-token"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: ent license acl option disabled missing server.enterpriseLicense.secretKey" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.enterpriseLicense.secretName=foo' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-create-enterprise-license-token"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# client.snapshotAgent + +@test "serverACLInit/Job: snapshot agent acl option disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-create-snapshot-agent-token"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: snapshot agent acl option enabled with .client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-create-snapshot-agent-token"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInit/Job: mesh gateway acl option enabled with .meshGateway.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-create-mesh-gateway-token"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "serverACLInit/Job: sets TLS flags when global.tls.enabled" { + cd `chart_dir` + local command=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual + actual=$(echo $command | jq -r '. | any(contains("-use-https"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + actual=$(echo $command | jq -r '. | any(contains("-consul-ca-cert=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + actual=$(echo $command | jq -r '. | any(contains("-consul-tls-server-name=server.dc1.consul"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInit/Job: can overwrite CA secret with the provided one" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the volume uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} + +#-------------------------------------------------------------------- +# namespaces + +@test "serverACLInit/Job: namespace options disabled by default" { + cd `chart_dir` + local object=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("consul-sync-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-sync-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("sync-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("create-inject-namespace-token"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# namespaces + sync + +@test "serverACLInit/Job: sync namespace options not set with namespaces enabled, sync disabled" { + cd `chart_dir` + local object=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'syncCatalog.consulNamespaces.mirroringK8S=true' \ + --set 'syncCatalog.consulNamespaces.mirroringK8SPrefix=k8s-' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-sync-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-sync-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("sync-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("create-inject-namespace-token"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: sync namespace options set with .global.enableConsulNamespaces=true and sync enabled" { + cd `chart_dir` + local object=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-sync-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-sync-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("sync-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("create-inject-namespace-token"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: sync mirroring options set with .syncCatalog.consulNamespaces.mirroringK8S=true" { + cd `chart_dir` + local object=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.consulNamespaces.mirroringK8S=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-sync-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-sync-k8s-namespace-mirroring=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("sync-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("create-inject-namespace-token"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: sync prefix can be set with .syncCatalog.consulNamespaces.mirroringK8SPrefix" { + cd `chart_dir` + local object=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.consulNamespaces.mirroringK8S=true' \ + --set 'syncCatalog.consulNamespaces.mirroringK8SPrefix=k8s-' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-sync-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-sync-k8s-namespace-mirroring=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("sync-k8s-namespace-mirroring-prefix=k8s-"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("create-inject-namespace-token"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# namespaces + inject + +@test "serverACLInit/Job: inject namespace options not set with namespaces enabled, inject disabled" { + cd `chart_dir` + local object=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ + --set 'connectInject.consulNamespaces.mirroringK8SPrefix=k8s-' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-sync-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-sync-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("sync-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("create-inject-namespace-token"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: inject namespace options set with .global.enableConsulNamespaces=true and inject enabled" { + cd `chart_dir` + local object=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-sync-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-sync-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("sync-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("create-inject-namespace-token"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: inject mirroring options set with .connectInject.consulNamespaces.mirroringK8S=true" { + cd `chart_dir` + local object=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-sync-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-sync-k8s-namespace-mirroring=true"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("sync-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("create-inject-namespace-token"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: inject prefix can be set with .connectInject.consulNamespaces.mirroringK8SPrefix" { + cd `chart_dir` + local object=$(helm template \ + -x templates/server-acl-init-job.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ + --set 'connectInject.consulNamespaces.mirroringK8SPrefix=k8s-' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-sync-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-sync-k8s-namespace-mirroring=true"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("sync-k8s-namespace-mirroring-prefix=k8s-"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("create-inject-namespace-token"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/server-acl-init-podsecuritypolicy.bats b/consul-helm/test/unit/server-acl-init-podsecuritypolicy.bats new file mode 100644 index 0000000..aee0cb5 --- /dev/null +++ b/consul-helm/test/unit/server-acl-init-podsecuritypolicy.bats @@ -0,0 +1,34 @@ +#!/usr/bin/env bats + +load _helpers + +@test "serverACLInit/PodSecurityPolicy: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-podsecuritypolicy.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/PodSecurityPolicy: disabled with global.bootstrapACLs=true and global.enablePodSecurityPolicies=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-podsecuritypolicy.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enablePodSecurityPolicies=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/PodSecurityPolicy: enabled with global.bootstrapACLs=true and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-podsecuritypolicy.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/server-acl-init-serviceaccount.bats b/consul-helm/test/unit/server-acl-init-serviceaccount.bats new file mode 100644 index 0000000..758e0a3 --- /dev/null +++ b/consul-helm/test/unit/server-acl-init-serviceaccount.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +load _helpers + +@test "serverACLInit/ServiceAccount: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/ServiceAccount: enabled with global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-serviceaccount.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "serverACLInit/ServiceAccount: disabled with server=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-serviceaccount.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/ServiceAccount: enabled with client=false and global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-acl-init-serviceaccount.yaml \ + --set 'global.bootstrapACLs=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/server-clusterrole.bats b/consul-helm/test/unit/server-clusterrole.bats new file mode 100644 index 0000000..88f005b --- /dev/null +++ b/consul-helm/test/unit/server-clusterrole.bats @@ -0,0 +1,78 @@ +#!/usr/bin/env bats + +load _helpers + +@test "server/ClusterRole: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-clusterrole.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/ClusterRole: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-clusterrole.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/ClusterRole: can be enabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-clusterrole.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/ClusterRole: disabled with server.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-clusterrole.yaml \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/ClusterRole: enabled with server.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-clusterrole.yaml \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +# The rules key must always be set (#178). +@test "server/ClusterRole: rules empty with server.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-clusterrole.yaml \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq '.rules' | tee /dev/stderr) + [ "${actual}" = "[]" ] +} + +#-------------------------------------------------------------------- +# global.enablePodSecurityPolicies + +@test "server/ClusterRole: podsecuritypolicies are added when global.enablePodSecurityPolicies is true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-clusterrole.yaml \ + --set 'server.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules | map(select(.resources[0] == "podsecuritypolicies")) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} diff --git a/consul-helm/test/unit/server-clusterrolebinding.bats b/consul-helm/test/unit/server-clusterrolebinding.bats new file mode 100644 index 0000000..125b767 --- /dev/null +++ b/consul-helm/test/unit/server-clusterrolebinding.bats @@ -0,0 +1,53 @@ +#!/usr/bin/env bats + +load _helpers + +@test "server/ClusterRoleBinding: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-clusterrolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/ClusterRoleBinding: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-clusterrolebinding.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/ClusterRoleBinding: disabled with server disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-clusterrolebinding.yaml \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/ClusterRoleBinding: enabled with server enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-clusterrolebinding.yaml \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/ClusterRoleBinding: enabled with server enabled and global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-clusterrolebinding.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} \ No newline at end of file diff --git a/consul-helm/test/unit/server-configmap.bats b/consul-helm/test/unit/server-configmap.bats new file mode 100755 index 0000000..e4a26fe --- /dev/null +++ b/consul-helm/test/unit/server-configmap.bats @@ -0,0 +1,167 @@ +#!/usr/bin/env bats + +load _helpers + +@test "server/ConfigMap: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/ConfigMap: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/ConfigMap: disable with server.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/ConfigMap: disable with global.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/ConfigMap: extraConfig is set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'server.extraConfig="{\"hello\": \"world\"}"' \ + . | tee /dev/stderr | + yq '.data["extra-from-values.json"] | match("world") | length' | tee /dev/stderr) + [ ! -z "${actual}" ] +} + +#-------------------------------------------------------------------- +# global.bootstrapACLs + +@test "server/ConfigMap: creates acl config with .global.bootstrapACLs enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.data["acl-config.json"] | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# connectInject.centralConfig + +@test "server/ConfigMap: centralConfig is enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.data["central-config.json"] | contains("enable_central_service_config")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/ConfigMap: centralConfig can be disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.enabled=false' \ + . | tee /dev/stderr | + yq '.data["central-config.json"] | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/ConfigMap: proxyDefaults disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.data["proxy-defaults-config.json"] | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/ConfigMap: proxyDefaults can be enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.proxyDefaults="{\"hello\": \"world\"}"' \ + . | tee /dev/stderr | + yq '.data["proxy-defaults-config.json"] | match("world") | length' | tee /dev/stderr) + [ ! -z "${actual}" ] +} + +@test "server/ConfigMap: proxyDefaults and meshGateways can be enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.proxyDefaults="{\"hello\": \"world\"}"' \ + --set 'meshGateway.enabled=true' \ + --set 'meshGateway.globalMode=remote' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.data["proxy-defaults-config.json"]' | yq -r '.config_entries.bootstrap[0].mesh_gateway.mode' | tee /dev/stderr) + [ "${actual}" = "remote" ] +} + +@test "server/ConfigMap: proxyDefaults should have no gateway mode if set to empty string" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.proxyDefaults="{\"hello\": \"world\"}"' \ + --set 'meshGateway.enabled=true' \ + --set 'meshGateway.globalMode=' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.data["proxy-defaults-config.json"]' | yq '.config_entries.bootstrap[0].mesh_gateway' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "server/ConfigMap: proxyDefaults should have no gateway mode if set to null" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.proxyDefaults="{\"hello\": \"world\"}"' \ + --set 'meshGateway.enabled=true' \ + --set 'meshGateway.globalMode=null' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.data["proxy-defaults-config.json"]' | yq '.config_entries.bootstrap[0].mesh_gateway' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "server/ConfigMap: global gateway mode is set even if there are no proxyDefaults" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-config-configmap.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.proxyDefaults=""' \ + --set 'meshGateway.enabled=true' \ + --set 'meshGateway.globalMode=remote' \ + --set 'client.grpc=true' \ + . | tee /dev/stderr | + yq -r '.data["proxy-defaults-config.json"]' | yq -r '.config_entries.bootstrap[0].mesh_gateway.mode' | tee /dev/stderr) + [ "${actual}" = "remote" ] +} diff --git a/consul-helm/test/unit/server-disruptionbudget.bats b/consul-helm/test/unit/server-disruptionbudget.bats new file mode 100755 index 0000000..b9b543b --- /dev/null +++ b/consul-helm/test/unit/server-disruptionbudget.bats @@ -0,0 +1,127 @@ +#!/usr/bin/env bats + +load _helpers + +@test "server/DisruptionBudget: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-disruptionbudget.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/DisruptionBudget: enabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-disruptionbudget.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/DisruptionBudget: disabled with server.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-disruptionbudget.yaml \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/DisruptionBudget: disabled with server.disruptionBudget.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-disruptionbudget.yaml \ + --set 'server.disruptionBudget.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/DisruptionBudget: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-disruptionbudget.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# maxUnavailable + +@test "server/DisruptionBudget: correct maxUnavailable with replicas=1" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-disruptionbudget.yaml \ + --set 'server.replicas=1' \ + . | tee /dev/stderr | + yq '.spec.maxUnavailable' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +@test "server/DisruptionBudget: correct maxUnavailable with replicas=3" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-disruptionbudget.yaml \ + --set 'server.replicas=3' \ + . | tee /dev/stderr | + yq '.spec.maxUnavailable' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +@test "server/DisruptionBudget: correct maxUnavailable with replicas=4" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-disruptionbudget.yaml \ + --set 'server.replicas=4' \ + . | tee /dev/stderr | + yq '.spec.maxUnavailable' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + + +@test "server/DisruptionBudget: correct maxUnavailable with replicas=5" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-disruptionbudget.yaml \ + --set 'server.replicas=5' \ + . | tee /dev/stderr | + yq '.spec.maxUnavailable' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +@test "server/DisruptionBudget: correct maxUnavailable with replicas=6" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-disruptionbudget.yaml \ + --set 'server.replicas=6' \ + . | tee /dev/stderr | + yq '.spec.maxUnavailable' | tee /dev/stderr) + [ "${actual}" = "2" ] +} + +@test "server/DisruptionBudget: correct maxUnavailable with replicas=7" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-disruptionbudget.yaml \ + --set 'server.replicas=7' \ + . | tee /dev/stderr | + yq '.spec.maxUnavailable' | tee /dev/stderr) + [ "${actual}" = "2" ] +} + +@test "server/DisruptionBudget: correct maxUnavailable with replicas=8" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-disruptionbudget.yaml \ + --set 'server.replicas=8' \ + . | tee /dev/stderr | + yq '.spec.maxUnavailable' | tee /dev/stderr) + [ "${actual}" = "3" ] +} diff --git a/consul-helm/test/unit/server-podsecuritypolicy.bats b/consul-helm/test/unit/server-podsecuritypolicy.bats new file mode 100644 index 0000000..91e010d --- /dev/null +++ b/consul-helm/test/unit/server-podsecuritypolicy.bats @@ -0,0 +1,33 @@ +#!/usr/bin/env bats + +load _helpers + +@test "server/PodSecurityPolicy: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-podsecuritypolicy.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/PodSecurityPolicy: disabled with server disabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-podsecuritypolicy.yaml \ + --set 'server.enabled=false' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/PodSecurityPolicy: enabled with global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-podsecuritypolicy.yaml \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/server-service.bats b/consul-helm/test/unit/server-service.bats new file mode 100755 index 0000000..d1c180e --- /dev/null +++ b/consul-helm/test/unit/server-service.bats @@ -0,0 +1,105 @@ +#!/usr/bin/env bats + +load _helpers + +@test "server/Service: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-service.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/Service: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-service.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/Service: disable with server.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-service.yaml \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/Service: disable with global.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-service.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +# This can be seen as testing just what we put into the YAML raw, but +# this is such an important part of making everything work we verify it here. +@test "server/Service: tolerates unready endpoints" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-service.yaml \ + . | tee /dev/stderr | + yq -r '.metadata.annotations["service.alpha.kubernetes.io/tolerate-unready-endpoints"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(helm template \ + -x templates/server-service.yaml \ + . | tee /dev/stderr | + yq -r '.spec.publishNotReadyAddresses' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "server/Service: no HTTPS listener when TLS is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-service.yaml \ + --set 'global.tls.enabled=false' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "https") | .port' | tee /dev/stderr) + [ "${actual}" == "" ] +} + +@test "server/Service: HTTPS listener set when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-service.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "https") | .port' | tee /dev/stderr) + [ "${actual}" == "8501" ] +} + +@test "server/Service: HTTP listener still active when httpsOnly is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-service.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.httpsOnly=false' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "http") | .port' | tee /dev/stderr) + [ "${actual}" == "8500" ] +} + +@test "server/Service: no HTTP listener when httpsOnly is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-service.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.httpsOnly=true' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "http") | .port' | tee /dev/stderr) + [ "${actual}" == "" ] +} diff --git a/consul-helm/test/unit/server-serviceaccount.bats b/consul-helm/test/unit/server-serviceaccount.bats new file mode 100644 index 0000000..bf1a742 --- /dev/null +++ b/consul-helm/test/unit/server-serviceaccount.bats @@ -0,0 +1,53 @@ +#!/usr/bin/env bats + +load _helpers + +@test "server/ServiceAccount: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/ServiceAccount: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-serviceaccount.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/ServiceAccount: disabled with server disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-serviceaccount.yaml \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/ServiceAccount: enabled with server enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-serviceaccount.yaml \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/ServiceAccount: enabled with server enabled and global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-serviceaccount.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/server-statefulset.bats b/consul-helm/test/unit/server-statefulset.bats new file mode 100755 index 0000000..a4930e5 --- /dev/null +++ b/consul-helm/test/unit/server-statefulset.bats @@ -0,0 +1,664 @@ +#!/usr/bin/env bats + +load _helpers + +@test "server/StatefulSet: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/StatefulSet: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/StatefulSet: disable with server.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/StatefulSet: disable with global.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# retry-join + +@test "server/StatefulSet: retry join gets populated" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.replicas=3' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command | any(contains("-retry-join"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# image + +@test "server/StatefulSet: image defaults to global.image" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.image=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "foo" ] +} + +@test "server/StatefulSet: image can be overridden with server.image" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.image=foo' \ + --set 'server.image=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "server/StatefulSet: no resources defined by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "server/StatefulSet: resources can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.resources=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = "foo" ] +} + +#-------------------------------------------------------------------- +# updateStrategy (derived from updatePartition) + +@test "server/StatefulSet: no updateStrategy when not updating" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq -r '.spec.updateStrategy' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "server/StatefulSet: updateStrategy during update" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.updatePartition=2' \ + . | tee /dev/stderr | + yq -r '.spec.updateStrategy.type' | tee /dev/stderr) + [ "${actual}" = "RollingUpdate" ] + + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.updatePartition=2' \ + . | tee /dev/stderr | + yq -r '.spec.updateStrategy.rollingUpdate.partition' | tee /dev/stderr) + [ "${actual}" = "2" ] +} + +#-------------------------------------------------------------------- +# storageClass + +@test "server/StatefulSet: no storageClass on claim by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq -r '.spec.volumeClaimTemplates[0].spec.storageClassName' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "server/StatefulSet: can set storageClass" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.storageClass=foo' \ + . | tee /dev/stderr | + yq -r '.spec.volumeClaimTemplates[0].spec.storageClassName' | tee /dev/stderr) + [ "${actual}" = "foo" ] +} + +#-------------------------------------------------------------------- +# extraVolumes + +@test "server/StatefulSet: adds extra volume" { + cd `chart_dir` + + # Test that it defines it + local object=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.extraVolumes[0].type=configMap' \ + --set 'server.extraVolumes[0].name=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.volumes[] | select(.name == "userconfig-foo")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.configMap.name' | tee /dev/stderr) + [ "${actual}" = "foo" ] + + local actual=$(echo $object | + yq -r '.configMap.secretName' | tee /dev/stderr) + [ "${actual}" = "null" ] + + # Test that it mounts it + local object=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.extraVolumes[0].type=configMap' \ + --set 'server.extraVolumes[0].name=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/userconfig/foo" ] + + # Doesn't load it + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.extraVolumes[0].type=configMap' \ + --set 'server.extraVolumes[0].name=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command | map(select(test("userconfig"))) | length' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +@test "server/StatefulSet: adds extra secret volume" { + cd `chart_dir` + + # Test that it defines it + local object=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.extraVolumes[0].type=secret' \ + --set 'server.extraVolumes[0].name=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.volumes[] | select(.name == "userconfig-foo")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.secret.name' | tee /dev/stderr) + [ "${actual}" = "null" ] + + local actual=$(echo $object | + yq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo" ] + + # Test that it mounts it + local object=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.extraVolumes[0].type=configMap' \ + --set 'server.extraVolumes[0].name=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/userconfig/foo" ] + + # Doesn't load it + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.extraVolumes[0].type=configMap' \ + --set 'server.extraVolumes[0].name=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command | map(select(test("userconfig"))) | length' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +@test "server/StatefulSet: adds loadable volume" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.extraVolumes[0].type=configMap' \ + --set 'server.extraVolumes[0].name=foo' \ + --set 'server.extraVolumes[0].load=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command | map(select(test("/consul/userconfig/foo"))) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +#-------------------------------------------------------------------- +# affinity + +@test "server/StatefulSet: affinity not set with server.affinity=null" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.affinity=null' \ + . | tee /dev/stderr | + yq '.spec.template.spec | .affinity? == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/StatefulSet: affinity set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.affinity | .podAntiAffinity? != null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "server/StatefulSet: nodeSelector is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "server/StatefulSet: specified nodeSelector" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.nodeSelector=testing' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "testing" ] +} + +#-------------------------------------------------------------------- +# priorityClassName + +@test "server/StatefulSet: priorityClassName is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.priorityClassName' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "server/StatefulSet: specified priorityClassName" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.priorityClassName=testing' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.priorityClassName' | tee /dev/stderr) + [ "${actual}" = "testing" ] +} + +#-------------------------------------------------------------------- +# annotations + +@test "server/StatefulSet: no annotations defined by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject")' | tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "server/StatefulSet: annotations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.annotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# tolerations + +@test "server/StatefulSet: tolerations not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec | .tolerations? == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/StatefulSet: tolerations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.tolerations=foobar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations == "foobar"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# gossip encryption + +@test "server/StatefulSet: gossip encryption disabled in server StatefulSet by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | length > 0' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "server/StatefulSet: gossip encryption disabled in server StatefulSet when secretName is missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.gossipEncryption.secretKey=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | length > 0' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "server/StatefulSet: gossip encryption disabled in server StatefulSet when secretKey is missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.gossipEncryption.secretName=foo' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | length > 0' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "server/StatefulSet: gossip environment variable present in server StatefulSet when all config is provided" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.gossipEncryption.secretKey=foo' \ + --set 'global.gossipEncryption.secretName=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/StatefulSet: encrypt CLI option not present in server StatefulSet when encryption disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name=="consul") | .command | join(" ") | contains("encrypt")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/StatefulSet: encrypt CLI option present in server StatefulSet when all config is provided" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.gossipEncryption.secretKey=foo' \ + --set 'global.gossipEncryption.secretName=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[] | select(.name=="consul") | .command | join(" ") | contains("encrypt")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# extraEnvironmentVariables + +@test "server/StatefulSet: custom environment variables" { + cd `chart_dir` + local object=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'server.extraEnvironmentVars.custom_proxy=fakeproxy' \ + --set 'server.extraEnvironmentVars.no_proxy=custom_no_proxy' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.[2].name' | tee /dev/stderr) + [ "${actual}" = "custom_proxy" ] + + local actual=$(echo $object | + yq -r '.[2].value' | tee /dev/stderr) + [ "${actual}" = "fakeproxy" ] + + local actual=$(echo $object | + yq -r '.[3].name' | tee /dev/stderr) + [ "${actual}" = "no_proxy" ] + + local actual=$(echo $object | + yq -r '.[3].value' | tee /dev/stderr) + [ "${actual}" = "custom_no_proxy" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "server/StatefulSet: CA volume present when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "server/StatefulSet: server volume present when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "tls-server-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "server/StatefulSet: CA volume mounted when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "server/StatefulSet: server certificate volume mounted when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "tls-server-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "server/StatefulSet: port 8501 is not exposed when TLS is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].ports[] | select (.containerPort == 8501)' | tee /dev/stderr) + [ "${actual}" == "" ] +} + +@test "server/StatefulSet: port 8501 is exposed when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].ports[] | select (.containerPort == 8501)' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "server/StatefulSet: port 8500 is still exposed when httpsOnly is not enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.httpsOnly=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].ports[] | select (.containerPort == 8500)' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "server/StatefulSet: port 8500 is not exposed when httpsOnly is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.httpsOnly=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].ports[] | select (.containerPort == 8500)' | tee /dev/stderr) + [ "${actual}" == "" ] +} + +@test "server/StatefulSet: readiness checks are over HTTP when TLS is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].readinessProbe.exec.command | join(" ") | contains("http://127.0.0.1:8500")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/StatefulSet: readiness checks are over HTTPS when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].readinessProbe.exec.command | join(" ") | contains("https://127.0.0.1:8501")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/StatefulSet: CA certificate is specified when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].readinessProbe.exec.command | join(" ") | contains("--cacert /consul/tls/ca/tls.crt")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/StatefulSet: HTTP is disabled in agent when httpsOnly is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.httpsOnly=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | join(" ") | contains("ports { http = -1 }")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/StatefulSet: sets Consul environment variables when global.tls.enabled" { + cd `chart_dir` + local env=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "https://localhost:8501" ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "server/StatefulSet: sets verify_* flags to true by default when global.tls.enabled" { + cd `chart_dir` + local command=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | join(" ")' | tee /dev/stderr) + + local actual + actual=$(echo $command | jq -r '. | contains("verify_incoming_rpc = true")' | tee /dev/stderr) + [ "${actual}" = "true" ] + + actual=$(echo $command | jq -r '. | contains("verify_outgoing = true")' | tee /dev/stderr) + [ "${actual}" = "true" ] + + actual=$(echo $command | jq -r '. | contains("verify_server_hostname = true")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "server/StatefulSet: doesn't set the verify_* flags by default when global.tls.enabled and global.tls.verify is false" { + cd `chart_dir` + local command=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.verify=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | join(" ")' | tee /dev/stderr) + + local actual + actual=$(echo $command | jq -r '. | contains("verify_incoming_rpc = true")' | tee /dev/stderr) + [ "${actual}" = "false" ] + + actual=$(echo $command | jq -r '. | contains("verify_outgoing = true")' | tee /dev/stderr) + [ "${actual}" = "false" ] + + actual=$(echo $command | jq -r '. | contains("verify_server_hostname = true")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/StatefulSet: can overwrite CA secret with the provided one" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -x templates/server-statefulset.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the volume uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} \ No newline at end of file diff --git a/consul-helm/test/unit/sync-catalog-clusterrole.bats b/consul-helm/test/unit/sync-catalog-clusterrole.bats new file mode 100755 index 0000000..61932f3 --- /dev/null +++ b/consul-helm/test/unit/sync-catalog-clusterrole.bats @@ -0,0 +1,106 @@ +#!/usr/bin/env bats + +load _helpers + +@test "syncCatalog/ClusterRole: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrole.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/ClusterRole: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrole.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/ClusterRole: disabled with sync disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrole.yaml \ + --set 'syncCatalog.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/ClusterRole: enabled with sync enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrole.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/ClusterRole: enabled with sync enabled and global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrole.yaml \ + --set 'global.enabled=false' \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.enablePodSecurityPolicies + +@test "syncCatalog/ClusterRole: allows podsecuritypolicies access with global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrole.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules[2].resources[0]' | tee /dev/stderr) + [ "${actual}" = "podsecuritypolicies" ] +} + +#-------------------------------------------------------------------- +# global.bootstrapACLs + +@test "syncCatalog/ClusterRole: allows secret access with global.bootsrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrole.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq -r '.rules[2].resources[0]' | tee /dev/stderr) + [ "${actual}" = "secrets" ] +} + +#-------------------------------------------------------------------- +# syncCatalog.toK8S={true,false} + +@test "syncCatalog/ClusterRole: has reduced permissions if toK8s=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrole.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.toK8S=false' \ + . | tee /dev/stderr | + yq -c '.rules[0].verbs' | tee /dev/stderr) + [ "${actual}" = '["get","list","watch"]' ] +} + +@test "syncCatalog/ClusterRole: has full permissions if toK8s=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrole.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.toK8S=true' \ + . | tee /dev/stderr | + yq -c '.rules[0].verbs' | tee /dev/stderr) + [ "${actual}" = '["get","list","watch","update","patch","delete","create"]' ] +} diff --git a/consul-helm/test/unit/sync-catalog-clusterrolebinding.bats b/consul-helm/test/unit/sync-catalog-clusterrolebinding.bats new file mode 100755 index 0000000..36935d3 --- /dev/null +++ b/consul-helm/test/unit/sync-catalog-clusterrolebinding.bats @@ -0,0 +1,53 @@ +#!/usr/bin/env bats + +load _helpers + +@test "syncCatalog/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/ClusterRoleBinding: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrolebinding.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/ClusterRoleBinding: disabled with sync disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrolebinding.yaml \ + --set 'syncCatalog.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/ClusterRoleBinding: enabled with sync enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrolebinding.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/ClusterRoleBinding: enabled with sync enabled and global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-clusterrolebinding.yaml \ + --set 'global.enabled=false' \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/sync-catalog-deployment.bats b/consul-helm/test/unit/sync-catalog-deployment.bats new file mode 100755 index 0000000..e486cea --- /dev/null +++ b/consul-helm/test/unit/sync-catalog-deployment.bats @@ -0,0 +1,630 @@ +#!/usr/bin/env bats + +load _helpers + +@test "syncCatalog/Deployment: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'global.enabled=false' \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: disable with syncCatalog.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: disable with global.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# image + +@test "syncCatalog/Deployment: image defaults to global.imageK8S" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'global.imageK8S=bar' \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "syncCatalog/Deployment: image can be overridden with server.image" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'global.imageK8S=foo' \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.image=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# default sync + +@test "syncCatalog/Deployment: default sync is true by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command | any(contains("-k8s-default-sync=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: default sync can be turned off" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.default=false' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command | any(contains("-k8s-default-sync=false"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# toConsul and toK8S + +@test "syncCatalog/Deployment: bidirectional by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-to-consul"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-to-k8s"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: to-k8s only" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.toConsul=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-to-consul=false"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.toConsul=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-to-k8s"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: to-consul only" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.toK8S=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-to-k8s=false"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.toK8S=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-to-consul"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# k8sPrefix + +@test "syncCatalog/Deployment: no k8sPrefix by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-k8s-service-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: can specify k8sPrefix" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.k8sPrefix=foo-' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-k8s-service-prefix=\"foo-\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# consulPrefix + +@test "syncCatalog/Deployment: no consulPrefix by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-service-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: can specify consulPrefix" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.consulPrefix=foo-' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-service-prefix=\"foo-\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# k8sTag + +@test "syncCatalog/Deployment: no k8sTag flag by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-k8s-tag"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: can specify k8sTag" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.k8sTag=clusterB' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-k8s-tag=clusterB"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# serviceAccount + +@test "syncCatalog/Deployment: serviceAccount set when sync enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.serviceAccountName | contains("sync-catalog")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# nodePortSyncType + +@test "syncCatalog/Deployment: nodePortSyncType defaults to ExternalFirst" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-node-port-sync-type=ExternalFirst"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: can set nodePortSyncType to InternalOnly" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.nodePortSyncType=InternalOnly' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-node-port-sync-type=InternalOnly"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: can set nodePortSyncType to ExternalOnly" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.nodePortSyncType=ExternalOnly' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-node-port-sync-type=ExternalOnly"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# aclSyncToken + +@test "syncCatalog/Deployment: aclSyncToken disabled when secretName is missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.aclSyncToken.secretKey=bar' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: aclSyncToken disabled when secretKey is missing" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.aclSyncToken.secretName=foo' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: aclSyncToken enabled when secretName and secretKey is provided" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.aclSyncToken.secretName=foo' \ + --set 'syncCatalog.aclSyncToken.secretKey=bar' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "syncCatalog/Deployment: nodeSelector is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "syncCatalog/Deployment: nodeSelector is not set by default with sync enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "syncCatalog/Deployment: specified nodeSelector" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.nodeSelector=testing' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "testing" ] +} + +#-------------------------------------------------------------------- +# global.bootstrapACLs + +@test "syncCatalog/Deployment: CONSUL_HTTP_TOKEN env variable created when global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: init container is created when global.bootstrapACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "sync-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# addK8SNamespaceSuffix + +@test "syncCatalog/Deployment: k8s namespace suffix enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-add-k8s-namespace-suffix"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: can set addK8SNamespaceSuffix to false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.addK8SNamespaceSuffix=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-add-k8s-namespace-suffix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "syncCatalog/Deployment: sets Consul environment variables when global.tls.enabled" { + cd `chart_dir` + local env=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "syncCatalog/Deployment: can overwrite CA secret with the provided one" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the volume uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} + +#-------------------------------------------------------------------- +# k8sAllowNamespaces & k8sDenyNamespaces + +@test "syncCatalog/Deployment: default is allow `*`, deny kube-system and kube-public" { + cd `chart_dir` + local object=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'map(select(test("allow-k8s-namespace"))) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] + + local actual=$(echo $object | + yq 'any(contains("allow-k8s-namespace=\"*\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("deny-k8s-namespace=\"kube-system\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("deny-k8s-namespace=\"kube-public\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: can set allow and deny namespaces { + cd `chart_dir` + local object=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.k8sAllowNamespaces[0]=allowNamespace' \ + --set 'syncCatalog.k8sDenyNamespaces[0]=denyNamespace' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'map(select(test("allow-k8s-namespace"))) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] + + local actual=$(echo $object | + yq 'map(select(test("deny-k8s-namespace"))) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] + + local actual=$(echo $object | + yq 'any(contains("allow-k8s-namespace=\"allowNamespace\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("deny-k8s-namespace=\"denyNamespace\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# namespaces + +@test "syncCatalog/Deployment: namespace options disabled by default" { + cd `chart_dir` + local object=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("consul-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: namespace options set with .global.enableConsulNamespaces=true" { + cd `chart_dir` + local object=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: mirroring options set with .syncCatalog.consulNamespaces.mirroringK8S=true" { + cd `chart_dir` + local object=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'syncCatalog.consulNamespaces.mirroringK8S=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-k8s-namespace-mirroring=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: prefix can be set with .syncCatalog.consulNamespaces.mirroringK8SPrefix" { + cd `chart_dir` + local object=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'syncCatalog.consulNamespaces.mirroringK8S=true' \ + --set 'syncCatalog.consulNamespaces.mirroringK8SPrefix=k8s-' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-k8s-namespace-mirroring=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("k8s-namespace-mirroring-prefix=k8s-"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# namespaces + global.bootstrapACLs + +@test "syncCatalog/Deployment: cross namespace policy is not added when global.bootstrapACLs=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/Deployment: cross namespace policy is added when global.bootstrapACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.bootstrapACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/sync-catalog-podsecuritypolicy.bats b/consul-helm/test/unit/sync-catalog-podsecuritypolicy.bats new file mode 100644 index 0000000..0a0adda --- /dev/null +++ b/consul-helm/test/unit/sync-catalog-podsecuritypolicy.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +load _helpers + +@test "syncCatalog/PodSecurityPolicy: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-podsecuritypolicy.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/PodSecurityPolicy: disabled by default with syncCatalog enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-podsecuritypolicy.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/PodSecurityPolicy: disabled with syncCatalog disabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-podsecuritypolicy.yaml \ + --set 'syncCatalog.enabled=false' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/PodSecurityPolicy: enabled with syncCatalog enabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-podsecuritypolicy.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/sync-catalog-serviceaccount.bats b/consul-helm/test/unit/sync-catalog-serviceaccount.bats new file mode 100755 index 0000000..f4af230 --- /dev/null +++ b/consul-helm/test/unit/sync-catalog-serviceaccount.bats @@ -0,0 +1,53 @@ +#!/usr/bin/env bats + +load _helpers + +@test "syncCatalog/ServiceAccount: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/ServiceAccount: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-serviceaccount.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/ServiceAccount: disabled with sync disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-serviceaccount.yaml \ + --set 'syncCatalog.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalog/ServiceAccount: enabled with sync enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-serviceaccount.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/ServiceAccount: enabled with sync enabled and global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/sync-catalog-serviceaccount.yaml \ + --set 'global.enabled=false' \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/test-runner.bats b/consul-helm/test/unit/test-runner.bats new file mode 100644 index 0000000..2b1fba6 --- /dev/null +++ b/consul-helm/test/unit/test-runner.bats @@ -0,0 +1,22 @@ +#!/usr/bin/env bats + +load _helpers + +@test "testRunner/Pod: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tests/test-runner.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "testRunner/Pod: disabled when tests.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tests/test-runner.yaml \ + --set 'tests.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} diff --git a/consul-helm/test/unit/tls-init-cleanup-clusterrole.bats b/consul-helm/test/unit/tls-init-cleanup-clusterrole.bats new file mode 100644 index 0000000..0357287 --- /dev/null +++ b/consul-helm/test/unit/tls-init-cleanup-clusterrole.bats @@ -0,0 +1,67 @@ +#!/usr/bin/env bats + +load _helpers + +@test "tlsInitCleanup/ClusterRole: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-clusterrole.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/ClusterRole: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-clusterrole.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/ClusterRole: disabled when server.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-clusterrole.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/ClusterRole: enabled when global.tls.enabled=true and server.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-clusterrole.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInitCleanup/ClusterRole: enabled with global.tls.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-clusterrole.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInitCleanup/ClusterRole: adds pod security polices with global.tls.enabled and global.enablePodSecurityPolicies" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-clusterrole.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules[] | select(.resources==["podsecuritypolicies"]) | .resourceNames[0]' | tee /dev/stderr) + + [ "${actual}" = "release-name-consul-tls-init-cleanup" ] +} diff --git a/consul-helm/test/unit/tls-init-cleanup-clusterrolebinding.bats b/consul-helm/test/unit/tls-init-cleanup-clusterrolebinding.bats new file mode 100644 index 0000000..d8d414e --- /dev/null +++ b/consul-helm/test/unit/tls-init-cleanup-clusterrolebinding.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats + +load _helpers + +@test "tlsInitCleanup/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-clusterrolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/ClusterRoleBinding: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-clusterrolebinding.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/ClusterRoleBinding: enabled with global.tls.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-clusterrolebinding.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInitCleanup/ClusterRoleBinding: disabled when server.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-clusterrolebinding.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/ClusterRoleBinding: enabled when global.tls.enabled=true and server.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-clusterrolebinding.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/tls-init-cleanup-job.bats b/consul-helm/test/unit/tls-init-cleanup-job.bats new file mode 100644 index 0000000..adf89a8 --- /dev/null +++ b/consul-helm/test/unit/tls-init-cleanup-job.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats + +load _helpers + +@test "tlsInitCleanup/Job: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-job.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/Job: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-job.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/Job: enabled with global.tls.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-job.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInitCleanup/Job: disabled when server.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-job.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/Job: enabled when global.tls.enabled=true and server.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-job.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/tls-init-cleanup-podsecuritypolicy.bats b/consul-helm/test/unit/tls-init-cleanup-podsecuritypolicy.bats new file mode 100644 index 0000000..a5be6d7 --- /dev/null +++ b/consul-helm/test/unit/tls-init-cleanup-podsecuritypolicy.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +load _helpers + +@test "tlsInitCleanup/PodSecurityPolicy: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-podsecuritypolicy.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/PodSecurityPolicy: disabled by default with TLS enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-podsecuritypolicy.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/PodSecurityPolicy: disabled with TLS disabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-podsecuritypolicy.yaml \ + --set 'global.tls.enabled=false' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/PodSecurityPolicy: enabled with TLS enabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-podsecuritypolicy.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/tls-init-cleanup-serviceaccount.bats b/consul-helm/test/unit/tls-init-cleanup-serviceaccount.bats new file mode 100644 index 0000000..3d5aef8 --- /dev/null +++ b/consul-helm/test/unit/tls-init-cleanup-serviceaccount.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats + +load _helpers + +@test "tlsInitCleanup/ServiceAccount: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/ServiceAccount: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-serviceaccount.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/ServiceAccount: enabled with global.tls.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-serviceaccount.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInitCleanup/ServiceAccount: disabled when server.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-serviceaccount.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInitCleanup/ServiceAccount: enabled when global.tls.enabled=true and server.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-cleanup-serviceaccount.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/tls-init-clusterrole.bats b/consul-helm/test/unit/tls-init-clusterrole.bats new file mode 100644 index 0000000..b842e97 --- /dev/null +++ b/consul-helm/test/unit/tls-init-clusterrole.bats @@ -0,0 +1,67 @@ +#!/usr/bin/env bats + +load _helpers + +@test "tlsInit/ClusterRole: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-clusterrole.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/ClusterRole: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-clusterrole.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/ClusterRole: disabled when server.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-clusterrole.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/ClusterRole: enabled when global.tls.enabled=true and server.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-clusterrole.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInit/ClusterRole: enabled with global.tls.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-clusterrole.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInit/ClusterRole: adds pod security polices with global.tls.enabled and global.enablePodSecurityPolicies" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-clusterrole.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules[] | select(.resources==["podsecuritypolicies"]) | .resourceNames[0]' | tee /dev/stderr) + + [ "${actual}" = "release-name-consul-tls-init" ] +} diff --git a/consul-helm/test/unit/tls-init-clusterrolebinding.bats b/consul-helm/test/unit/tls-init-clusterrolebinding.bats new file mode 100644 index 0000000..b9e5478 --- /dev/null +++ b/consul-helm/test/unit/tls-init-clusterrolebinding.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats + +load _helpers + +@test "tlsInit/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-clusterrolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/ClusterRoleBinding: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-clusterrolebinding.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/ClusterRoleBinding: enabled with global.tls.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-clusterrolebinding.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInit/ClusterRoleBinding: disabled when server.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-clusterrolebinding.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/ClusterRoleBinding: enabled when global.tls.enabled=true and server.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-clusterrolebinding.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/tls-init-job.bats b/consul-helm/test/unit/tls-init-job.bats new file mode 100644 index 0000000..23fae59 --- /dev/null +++ b/consul-helm/test/unit/tls-init-job.bats @@ -0,0 +1,112 @@ +#!/usr/bin/env bats + +load _helpers + +@test "tlsInit/Job: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-job.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/Job: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-job.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/Job: enabled with global.tls.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-job.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInit/Job: disabled when server.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-job.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/Job: enabled when global.tls.enabled=true and server.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-job.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInit/Job: sets additional IP SANs when provided and global.tls.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-job.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.serverAdditionalIPSANs[0]=1.1.1.1' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-additional-ipaddress=1.1.1.1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInit/Job: sets additional DNS SANs when provided and global.tls.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-job.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.serverAdditionalDNSSANs[0]=example.com' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-additional-dnsname=example.com"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInit/Job: can overwrite CA secret with the provided one" { + cd `chart_dir` + local spec=$(helm template \ + -x templates/tls-init-job.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $spec | jq -r '.volumes[] | select(.name=="consul-ca-cert") | .secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # uses the provided secret key for CA cert + actual=$(echo $spec | jq -r '.volumes[] | select(.name=="consul-ca-cert") | .secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] + + # check that the provided ca key secret is attached as a volume + local actual + actual=$(echo $spec | jq -r '.volumes[] | select(.name=="consul-ca-key") | .secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-key" ] + + # uses the provided secret key for CA cert + actual=$(echo $spec | jq -r '.volumes[] | select(.name=="consul-ca-key") | .secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] + + # check that it doesn't generate the CA + actual=$(echo $spec | jq -r '.containers[0].command | join(" ") | contains("consul tls ca create")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} diff --git a/consul-helm/test/unit/tls-init-podsecuritypolicy.bats b/consul-helm/test/unit/tls-init-podsecuritypolicy.bats new file mode 100644 index 0000000..3554e48 --- /dev/null +++ b/consul-helm/test/unit/tls-init-podsecuritypolicy.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +load _helpers + +@test "tlsInit/PodSecurityPolicy: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-podsecuritypolicy.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/PodSecurityPolicy: disabled by default with TLS enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-podsecuritypolicy.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/PodSecurityPolicy: disabled with TLS disabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-podsecuritypolicy.yaml \ + --set 'global.tls.enabled=false' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/PodSecurityPolicy: enabled with TLS enabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-podsecuritypolicy.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/tls-init-serviceaccount.bats b/consul-helm/test/unit/tls-init-serviceaccount.bats new file mode 100644 index 0000000..2ed13a4 --- /dev/null +++ b/consul-helm/test/unit/tls-init-serviceaccount.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats + +load _helpers + +@test "tlsInit/ServiceAccount: disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/ServiceAccount: disabled with global.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-serviceaccount.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/ServiceAccount: enabled with global.tls.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-serviceaccount.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "tlsInit/ServiceAccount: disabled when server.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-serviceaccount.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "tlsInit/ServiceAccount: enabled when global.tls.enabled=true and server.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/tls-init-serviceaccount.yaml \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/consul-helm/test/unit/ui-service.bats b/consul-helm/test/unit/ui-service.bats new file mode 100755 index 0000000..c86b067 --- /dev/null +++ b/consul-helm/test/unit/ui-service.bats @@ -0,0 +1,183 @@ +#!/usr/bin/env bats + +load _helpers + +@test "ui/Service: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ui/Service: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + --set 'ui.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ui/Service: disable with server.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'server.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "ui/Service: disable with ui.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'ui.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "ui/Service: disable with ui.service.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'ui.service.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "ui/Service: disable with global.enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'global.enabled=false' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "ui/Service: disable with global.enabled and server.enabled on" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "ui/Service: no type by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + . | tee /dev/stderr | + yq -r '.spec.type' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "ui/Service: specified type" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'ui.service.type=LoadBalancer' \ + . | tee /dev/stderr | + yq -r '.spec.type' | tee /dev/stderr) + [ "${actual}" = "LoadBalancer" ] +} + +#-------------------------------------------------------------------- +# annotations + +@test "ui/Service: no annotations by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + . | tee /dev/stderr | + yq -r '.metadata.annotations' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "ui/Service: annotations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'ui.service.annotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# additionalSpec + +@test "ui/Service: no additionalSpec by default" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + . | tee /dev/stderr | + yq -r '.spec.loadBalancerIP' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "ui/Service: additionalSpec can be set" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'ui.service.additionalSpec=loadBalancerIP: 1.2.3.4' \ + . | tee /dev/stderr | + yq -r '.spec.loadBalancerIP' | tee /dev/stderr) + [ "${actual}" = "1.2.3.4" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "ui/Service: no HTTPS listener when TLS is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'global.tls.enabled=false' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "https") | .port' | tee /dev/stderr) + [ "${actual}" == "" ] +} + +@test "ui/Service: HTTPS listener set when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "https") | .port' | tee /dev/stderr) + [ "${actual}" == "443" ] +} + +@test "ui/Service: HTTP listener still active when httpsOnly is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.httpsOnly=false' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "http") | .port' | tee /dev/stderr) + [ "${actual}" == "80" ] +} + +@test "ui/Service: no HTTP listener when httpsOnly is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -x templates/ui-service.yaml \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.httpsOnly=true' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "http") | .port' | tee /dev/stderr) + [ "${actual}" == "" ] +} diff --git a/consul-helm/values.yaml b/consul-helm/values.yaml new file mode 100644 index 0000000..7001214 --- /dev/null +++ b/consul-helm/values.yaml @@ -0,0 +1,862 @@ +# Available parameters and their default values for the Consul chart. + +# global holds values that affect multiple components of the chart. +global: + # enabled is the master enabled/disabled setting. + # If true, servers, clients, Consul DNS and the Consul UI will be enabled. + # Each component can override this default via its component-specific + # "enabled" config. + # If false, no components will be installed by default and per-component + # opt-in is required, such as by setting `server.enabled` to true. + enabled: true + + # name sets the prefix used for all resources in the helm chart. + # If not set, the prefix will be "-consul". + name: null + + # domain is the domain Consul will answer DNS queries for + # (see https://www.consul.io/docs/agent/options.html#_domain) and the domain + # services synced from Consul into Kubernetes will have, + # e.g. `service-name.service.consul`. + domain: consul + + # image is the name (and tag) of the Consul Docker image for clients and + # servers. This can be overridden per component. + # This should be pinned to a specific version tag, otherwise you may + # inadvertently upgrade your Consul version. + # + # Examples: + # # Consul 1.5.0 + # image: "consul:1.5.0" + # # Consul Enterprise 1.5.0 + # image: "hashicorp/consul-enterprise:1.5.0-ent" + image: "consul:1.7.1" + + # imageK8S is the name (and tag) of the consul-k8s Docker image that + # is used for functionality such as catalog sync. This can be overridden + # per component. + # Note: support for the catalog sync's liveness and readiness probes was added + # to consul-k8s 0.6.0. If using an older consul-k8s version, you may need to + # remove these checks to make the sync work. + # If using bootstrapACLs then must be >= 0.10.1. + # If using connect inject then must be >= 0.10.1. + # If using Consul Enterprise namespaces, must be >= 0.12. + imageK8S: "hashicorp/consul-k8s:0.12.0" + + # datacenter is the name of the datacenter that the agents should register + # as. This can't be changed once the Consul cluster is up and running + # since Consul doesn't support an automatic way to change this value + # currently: https://github.com/hashicorp/consul/issues/1858. + datacenter: dc1 + + # enablePodSecurityPolicies controls whether pod + # security policies are created for the Consul components created by this + # chart. See https://kubernetes.io/docs/concepts/policy/pod-security-policy/. + enablePodSecurityPolicies: false + + # gossipEncryption configures which Kubernetes secret to retrieve Consul's + # gossip encryption key from (see https://www.consul.io/docs/agent/options.html#_encrypt). + # If secretName or secretKey are not set, gossip encryption will not be enabled. + # The secret must be in the same namespace that Consul is installed into. + # + # The secret can be created by running: + # kubectl create secret generic consul-gossip-encryption-key \ + # --from-literal=key=$(consul keygen). + # + # In this case, secretName would be "consul-gossip-encryption-key" and + # secretKey would be "key". + gossipEncryption: + # secretName is the name of the Kubernetes secret that holds the gossip + # encryption key. The secret must be in the same namespace that Consul is installed into. + secretName: "" + # secretKey is the key within the Kubernetes secret that holds the gossip + # encryption key. + secretKey: "" + + # bootstrapACLs will automatically create and assign ACL tokens within + # the Consul cluster. This requires servers to be running inside Kubernetes. + # Additionally requires Consul >= 1.4 and consul-k8s >= 0.10.1. + bootstrapACLs: false + + # Enables TLS encryption across the cluster to verify authenticity of the + # servers and clients that connect. Note: It is HIGHLY recommended that you also + # enable Gossip encryption. + # See https://learn.hashicorp.com/consul/security-networking/agent-encryption + # + # Note: this relies on functionality introduced with Consul 1.4.1. Make sure + # your global.image value is at least version 1.4.1. + tls: + enabled: false + + # serverAdditionalDNSSANs is a list of additional DNS names to + # set as Subject Alternative Names (SANs) in the server certificate. + # This is useful when you need to access the Consul server(s) externally, + # for example, if you're using the UI. + serverAdditionalDNSSANs: [] + + # serverAdditionalIPSANs is a list of additional IP addresses to + # set as Subject Alternative Names (SANs) in the server certificate. + # This is useful when you need to access Consul server(s) externally, + # for example, if you're using the UI. + serverAdditionalIPSANs: [] + + # If verify is true, 'verify_outgoing', 'verify_server_hostname', and + # 'verify_incoming_rpc' will be set to true for Consul servers and clients. + # Set this to false to incrementally roll out TLS on an existing Consul cluster. + # Note: remember to switch it back to true once the rollout is complete. + # Please see this guide for more details: + # https://learn.hashicorp.com/consul/security-networking/certificates + verify: true + + # If httpsOnly is true, Consul will disable the HTTP port on both + # clients and servers and only accept HTTPS connections. + httpsOnly: true + + # caCert is a Kubernetes secret containing the certificate + # of the CA to use for TLS communication within the Consul cluster. + # If you have generated the CA yourself with the consul CLI, + # you could use the following command to create the secret in Kubernetes: + # + # kubectl create secret generic consul-ca-cert \ + # --from-file='tls.crt=./consul-agent-ca.pem' + caCert: + secretName: null + secretKey: null + + # caKey is a Kubernetes secret containing the private key + # of the CA to use for TLS communications within the Consul cluster. + # If you have generated the CA yourself with the consul CLI, + # you could use the following command to create the secret in Kubernetes: + # + # kubectl create secret generic consul-ca-key \ + # --from-file='tls.key=./consul-agent-ca-key.pem' + # + # Note that we need the CA key so that we can generate server and client certificates. + # It is particularly important for the client certificates since they need to have host IPs + # as Subject Alternative Names. In the future, we may support bringing your own server + # certificates. + caKey: + secretName: null + secretKey: null + + # [Enterprise Only] enableConsulNamespaces indicates that you are running + # Consul Enterprise v1.7+ with a valid Consul Enterprise license and would like to + # make use of configuration beyond registering everything into the `default` Consul + # namespace. Requires consul-k8s v0.12+. + # Additional configuration options are found in the `consulNamespaces` section + # of both the catalog sync and connect injector. + enableConsulNamespaces: false + +# Server, when enabled, configures a server cluster to run. This should +# be disabled if you plan on connecting to a Consul cluster external to +# the Kube cluster. +server: + enabled: "-" + image: null + replicas: 3 + bootstrapExpect: 3 # Should <= replicas count + + # enterpriseLicense refers to a Kubernetes secret that you have created that + # contains your enterprise license. It is required if you are using an + # enterprise binary. Defining it here applies it to your cluster once a leader + # has been elected. If you are not using an enterprise image + # or if you plan to introduce the license key via another route, then set + # these fields to null. + enterpriseLicense: + secretName: null + secretKey: null + + # storage and storageClass are the settings for configuring stateful + # storage for the server pods. storage should be set to the disk size of + # the attached volume. storageClass is the class of storage which defaults + # to null (the Kube cluster will pick the default). + storage: 10Gi + storageClass: null + + # connect will enable Connect on all the servers, initializing a CA + # for Connect-related connections. Other customizations can be done + # via the extraConfig setting. + connect: true + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # updatePartition is used to control a careful rolling update of Consul + # servers. This should be done particularly when changing the version + # of Consul. Please refer to the documentation for more information. + updatePartition: 0 + + # disruptionBudget enables the creation of a PodDisruptionBudget to + # prevent voluntary degrading of the Consul server cluster. + disruptionBudget: + enabled: true + + # maxUnavailable will default to (n/2)-1 where n is the number of + # replicas. If you'd like a custom value, you can specify an override here. + maxUnavailable: null + + # extraConfig is a raw string of extra configuration to set with the + # server. This should be JSON. + extraConfig: | + {} + + # extraVolumes is a list of extra volumes to mount. These will be exposed + # to Consul in the path `/consul/userconfig//`. The value below is + # an array of objects, examples are shown below. + extraVolumes: [] + # - type: secret (or "configMap") + # name: my-secret + # load: false # if true, will add to `-config-dir` to load by Consul + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: server + topologyKey: kubernetes.io/hostname + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a multi-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: null + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + # Extra annotations to attach to the server pods + # This should be a multi-line string mapping directly to the a map of + # the annotations to apply to the server pods + annotations: null + + # extraEnvVars is a list of extra environment variables to set with the stateful set. These could be + # used to include proxy settings required for cloud auto-join feature, + # in case kubernetes cluster is behind egress http proxies. Additionally, it could be used to configure + # custom consul parameters. + extraEnvironmentVars: {} + # http_proxy: http://localhost:3128, + # https_proxy: http://localhost:3128, + # no_proxy: internal.domain.com + +# Client, when enabled, configures Consul clients to run on every node +# within the Kube cluster. The current deployment model follows a traditional +# DC where a single agent is deployed per node. +client: + enabled: "-" + image: null + join: null + + # dataDirectoryHostPath is an absolute path to a directory on the host machine + # to use as the Consul client data directory. + # If set to the empty string or null, the Consul agent will store its data + # in the Pod's local filesystem (which will be lost if the Pod is deleted). + # Security Warning: If setting this, Pod Security Policies *must* be enabled on your cluster + # and in this Helm chart (via the global.enablePodSecurityPolicies setting) + # to prevent other Pods from mounting the same host path and gaining + # access to all of Consul's data. Consul's data is not encrypted at rest. + dataDirectoryHostPath: null + + # If true, Consul's gRPC port will be exposed (see https://www.consul.io/docs/agent/options.html#grpc_port). + # This should be set to true if connectInject or meshGateway is enabled. + grpc: true + + # exposeGossipPorts exposes the clients' gossip ports as hostPorts. + # This is only necessary if pod IPs in the k8s cluster are not directly + # routable and the Consul servers are outside of the k8s cluster. This + # also changes the clients' advertised IP to the hostIP rather than podIP. + exposeGossipPorts: false + + # Resource requests, limits, etc. for the client cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # extraConfig is a raw string of extra configuration to set with the + # client. This should be JSON. + extraConfig: | + {} + + # extraVolumes is a list of extra volumes to mount. These will be exposed + # to Consul in the path `/consul/userconfig//`. The value below is + # an array of objects, examples are shown below. + extraVolumes: [] + # - type: secret (or "configMap") + # name: my-secret + # load: false # if true, will add to `-config-dir` to load by Consul + + # Toleration Settings for Client pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + # The example below will allow Client pods to run on every node + # regardless of taints + # tolerations: | + # - operator: "Exists" + tolerations: "" + + # nodeSelector labels for client pod assignment, formatted as a multi-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: null + + # Affinity Settings for Client pods, formatted as a multi-line YAML string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + # Example: + # affinity: | + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: node-role.kubernetes.io/master + # operator: DoesNotExist + affinity: {} + + # used to assign priority to client pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + # Extra annotations to attach to the client pods + # This should be a multi-line string mapping directly to the a map of + # the annotations to apply to the client pods + annotations: null + + # extraEnvVars is a list of extra environment variables to set with the pod. These could be + # used to include proxy settings required for cloud auto-join feature, + # in case kubernetes cluster is behind egress http proxies. Additionally, it could be used to configure + # custom consul parameters. + extraEnvironmentVars: {} + # http_proxy: http://localhost:3128, + # https_proxy: http://localhost:3128, + # no_proxy: internal.domain.com + + # dnsPolicy to use. + dnsPolicy: null + + # updateStrategy for the DaemonSet. + # See https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/#daemonset-update-strategy. + # This should be a multi-line string mapping directly to the updateStrategy + # Example: + # updateStrategy: | + # rollingUpdate: + # maxUnavailable: 5 + # type: RollingUpdate + updateStrategy: null + + # snapshotAgent contains settings for setting up and running snapshot agents + # within the Consul clusters. They are required to be co-located with Consul + # clients, so will inherit the clients' nodeSelector, tolerations and affinity. + # This is an Enterprise feature only. + snapshotAgent: + enabled: false + + # replicas determines how many snapshot agent pods are created + replicas: 2 + + # configSecret references a Kubernetes secret that should be manually created to + # contain the entire config to be used on the snapshot agent. This is the preferred + # method of configuration since there are usually storage credentials present. + # Snapshot agent config details: + # https://www.consul.io/docs/commands/snapshot/agent.html#config-file-options- + # To create a secret: + # https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-using-kubectl-create-secret + configSecret: + secretName: null + secretKey: null + +# Configuration for DNS configuration within the Kubernetes cluster. +# This creates a service that routes to all agents (client or server) +# for serving DNS requests. This DOES NOT automatically configure kube-dns +# today, so you must still manually configure a `stubDomain` with kube-dns +# for this to have any effect: +# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#configure-stub-domain-and-upstream-dns-servers +dns: + enabled: "-" + + # Set a predefined cluster IP for the DNS service. + # Useful if you need to reference the DNS service's IP + # address in CoreDNS config. + clusterIP: null + + # Extra annotations to attach to the dns service + # This should be a multi-line string of + # annotations to apply to the dns Service + annotations: null + +ui: + # True if you want to enable the Consul UI. The UI will run only + # on the server nodes. This makes UI access via the service below (if + # enabled) predictable rather than "any node" if you're running Consul + # clients as well. + enabled: "-" + + # True if you want to create a Service entry for the Consul UI. + # + # serviceType can be used to control the type of service created. For + # example, setting this to "LoadBalancer" will create an external load + # balancer (for supported K8S installations) to access the UI. + service: + enabled: true + type: null + # This should be a multi-line string mapping directly to the a map of + # the annotations to apply to the UI service + annotations: null + # Additional ServiceSpec values + # This should be a multi-line string mapping directly to a Kubernetes + # ServiceSpec object. + additionalSpec: null + +# syncCatalog will run the catalog sync process to sync K8S with Consul +# services. This can run bidirectional (default) or unidirectionally (Consul +# to K8S or K8S to Consul only). +# +# This process assumes that a Consul agent is available on the host IP. +# This is done automatically if clients are enabled. If clients are not +# enabled then set the node selection so that it chooses a node with a +# Consul agent. +syncCatalog: + # True if you want to enable the catalog sync. Set to "-" to inherit from + # global.enabled. + enabled: false + image: null + default: true # true will sync by default, otherwise requires annotation + + # toConsul and toK8S control whether syncing is enabled to Consul or K8S + # as a destination. If both of these are disabled, the sync will do nothing. + toConsul: true + toK8S: true + + # k8sPrefix is the service prefix to prepend to services before registering + # with Kubernetes. For example "consul-" will register all services + # prepended with "consul-". (Consul -> Kubernetes sync) + k8sPrefix: null + + # k8sAllowNamespaces is a list of k8s namespaces to sync the k8s services from. + # If a k8s namespace is not included in this list or is listed in `k8sDenyNamespaces`, + # services in that k8s namespace will not be synced even if they are explicitly + # annotated. Use ["*"] to automatically allow all k8s namespaces. + # + # For example, ["namespace1", "namespace2"] will only allow services in the k8s + # namespaces `namespace1` and `namespace2` to be synced and registered + # with Consul. All other k8s namespaces will be ignored. + # + # To deny all namespaces, set this to []. + # + # Note: `k8sDenyNamespaces` takes precedence over values defined here. + # Requires consul-k8s v0.12+ + k8sAllowNamespaces: ["*"] + + # k8sDenyNamespaces is a list of k8s namespaces that should not have their + # services synced. This list takes precedence over `k8sAllowNamespaces`. + # `*` is not supported because then nothing would be allowed to sync. + # Requires consul-k8s v0.12+. + # + # For example, if `k8sAllowNamespaces` is `["*"]` and `k8sDenyNamespaces` is + # `["namespace1", "namespace2"]`, then all k8s namespaces besides "namespace1" + # and "namespace2" will be synced. + k8sDenyNamespaces: ["kube-system", "kube-public"] + + # [DEPRECATED] Use k8sAllowNamespaces and k8sDenyNamespaces instead. For + # backwards compatibility, if both this and the allow/deny lists are set, + # the allow/deny lists will be ignored. + # k8sSourceNamespace is the Kubernetes namespace to watch for service + # changes and sync to Consul. If this is not set then it will default + # to all namespaces. + k8sSourceNamespace: null + + # [Enterprise Only] These settings manage the catalog sync's interaction with + # Consul namespaces (requires consul-ent v1.7+ and consul-k8s v0.12+). + # Also, `global.enableConsulNamespaces` must be true. + consulNamespaces: + # consulDestinationNamespace is the name of the Consul namespace to register all + # k8s services into. If the Consul namespace does not already exist, + # it will be created. This will be ignored if `mirroringK8S` is true. + consulDestinationNamespace: "default" + + # mirroringK8S causes k8s services to be registered into a Consul namespace + # of the same name as their k8s namespace, optionally prefixed if + # `mirroringK8SPrefix` is set below. If the Consul namespace does not + # already exist, it will be created. Turning this on overrides the + # `consulDestinationNamespace` setting. + # `addK8SNamespaceSuffix` may no longer be needed if enabling this option. + mirroringK8S: false + + # If `mirroringK8S` is set to true, `mirroringK8SPrefix` allows each Consul namespace + # to be given a prefix. For example, if `mirroringK8SPrefix` is set to "k8s-", a + # service in the k8s `staging` namespace will be registered into the + # `k8s-staging` Consul namespace. + mirroringK8SPrefix: "" + + # addK8SNamespaceSuffix appends Kubernetes namespace suffix to + # each service name synced to Consul, separated by a dash. + # For example, for a service 'foo' in the default namespace, + # the sync process will create a Consul service named 'foo-default'. + # Set this flag to true to avoid registering services with the same name + # but in different namespaces as instances for the same Consul service. + # Namespace suffix is not added if 'annotationServiceName' is provided. + addK8SNamespaceSuffix: true + + # consulPrefix is the service prefix which prepends itself + # to Kubernetes services registered within Consul + # For example, "k8s-" will register all services prepended with "k8s-". + # (Kubernetes -> Consul sync) + # consulPrefix is ignored when 'annotationServiceName' is provided. + # NOTE: Updating this property to a non-null value for an existing installation will result in deregistering + # of existing services in Consul and registering them with a new name. + consulPrefix: null + + # k8sTag is an optional tag that is applied to all of the Kubernetes services + # that are synced into Consul. If nothing is set, defaults to "k8s". + # (Kubernetes -> Consul sync) + k8sTag: null + + # syncClusterIPServices syncs services of the ClusterIP type, which may + # or may not be broadly accessible depending on your Kubernetes cluster. + # Set this to false to skip syncing ClusterIP services. + syncClusterIPServices: true + + # nodePortSyncType configures the type of syncing that happens for NodePort + # services. The valid options are: ExternalOnly, InternalOnly, ExternalFirst. + # - ExternalOnly will only use a node's ExternalIP address for the sync + # - InternalOnly use's the node's InternalIP address + # - ExternalFirst will preferentially use the node's ExternalIP address, but + # if it doesn't exist, it will use the node's InternalIP address instead. + nodePortSyncType: ExternalFirst + + # aclSyncToken refers to a Kubernetes secret that you have created that contains + # an ACL token for your Consul cluster which allows the sync process the correct + # permissions. This is only needed if ACLs are enabled on the Consul cluster. + aclSyncToken: + secretName: null + secretKey: null + + # nodeSelector labels for syncCatalog pod assignment, formatted as a multi-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: null + + # Log verbosity level. One of "trace", "debug", "info", "warn", or "error". + logLevel: info + + # Override the default interval to perform syncing operations creating Consul services. + consulWriteInterval: null + +# ConnectInject will enable the automatic Connect sidecar injector. +connectInject: + # True if you want to enable connect injection. Set to "-" to inherit from + # global.enabled. + # Requires consul-k8s >= 0.10.1. + enabled: false + image: null # image for consul-k8s that contains the injector + default: false # true will inject by default, otherwise requires annotation + + # The Docker image for Consul to use when performing Connect injection. + # Defaults to global.image. + imageConsul: null + + # The Docker image for envoy to use as the proxy sidecar when performing + # Connect injection. If using Consul 1.7+, the envoy version must be 1.13+. + # If not set, the image used depends on the consul-k8s version. For + # consul-k8s 0.12.0 the default is envoyproxy/envoy-alpine:v1.13.0. + imageEnvoy: null + + # namespaceSelector is the selector for restricting the webhook to only + # specific namespaces. This should be set to a multiline string. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector + # for more details. + # Example: + # namespaceSelector: | + # matchLabels: + # namespace-label: label-value + namespaceSelector: null + + # k8sAllowNamespaces is a list of k8s namespaces to allow Connect sidecar + # injection in. If a k8s namespace is not included or is listed in `k8sDenyNamespaces`, + # pods in that k8s namespace will not be injected even if they are explicitly + # annotated. Use ["*"] to automatically allow all k8s namespaces. + # + # For example, ["namespace1", "namespace2"] will only allow pods in the k8s + # namespaces `namespace1` and `namespace2` to have Connect sidecars injected + # and registered with Consul. All other k8s namespaces will be ignored. + # + # To deny all namespaces, set this to []. + # + # Note: `k8sDenyNamespaces` takes precedence over values defined here and + # `namespaceSelector` takes precedence over both since it is applied first. + # `kube-system` and `kube-public` are never injected, even if included here. + # Requires consul-k8s v0.12+ + k8sAllowNamespaces: ["*"] + + # k8sDenyNamespaces is a list of k8s namespaces that should not allow Connect + # sidecar injection. This list takes precedence over `k8sAllowNamespaces`. + # `*` is not supported because then nothing would be allowed to be injected. + # + # For example, if `k8sAllowNamespaces` is `["*"]` and k8sDenyNamespaces is + # `["namespace1", "namespace2"]`, then all k8s namespaces besides "namespace1" + # and "namespace2" will be available for injection. + # + # Note: `namespaceSelector` takes precedence over this since it is applied first. + # `kube-system` and `kube-public` are never injected. + # Requires consul-k8s v0.12+. + k8sDenyNamespaces: [] + + # [Enterprise Only] These settings manage the connect injector's interaction with + # Consul namespaces (requires consul-ent v1.7+ and consul-k8s v0.12+). + # Also, `global.enableConsulNamespaces` must be true. + consulNamespaces: + # consulDestinationNamespace is the name of the Consul namespace to register all + # k8s pods into. If the Consul namespace does not already exist, + # it will be created. This will be ignored if `mirroringK8S` is true. + consulDestinationNamespace: "default" + + # mirroringK8S causes k8s pods to be registered into a Consul namespace + # of the same name as their k8s namespace, optionally prefixed if + # `mirroringK8SPrefix` is set below. If the Consul namespace does not + # already exist, it will be created. Turning this on overrides the + # `consulDestinationNamespace` setting. + mirroringK8S: false + + # If `mirroringK8S` is set to true, `mirroringK8SPrefix` allows each Consul namespace + # to be given a prefix. For example, if `mirroringK8SPrefix` is set to "k8s-", a + # pod in the k8s `staging` namespace will be registered into the + # `k8s-staging` Consul namespace. + mirroringK8SPrefix: "" + + # The certs section configures how the webhook TLS certs are configured. + # These are the TLS certs for the Kube apiserver communicating to the + # webhook. By default, the injector will generate and manage its own certs, + # but this requires the ability for the injector to update its own + # MutatingWebhookConfiguration. In a production environment, custom certs + # should probably be used. Configure the values below to enable this. + certs: + # secretName is the name of the secret that has the TLS certificate and + # private key to serve the injector webhook. If this is null, then the + # injector will default to its automatic management mode that will assign + # a service account to the injector to generate its own certificates. + secretName: null + + # caBundle is a base64-encoded PEM-encoded certificate bundle for the + # CA that signed the TLS certificate that the webhook serves. This must + # be set if secretName is non-null. + caBundle: "" + + # certName and keyName are the names of the files within the secret for + # the TLS cert and private key, respectively. These have reasonable + # defaults but can be customized if necessary. + certName: tls.crt + keyName: tls.key + + # nodeSelector labels for connectInject pod assignment, formatted as a multi-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: null + + # aclBindingRuleSelector accepts a query that defines which Service Accounts + # can authenticate to Consul and receive an ACL token during Connect injection. + # The default setting, i.e. serviceaccount.name!=default, prevents the + # 'default' Service Account from logging in. + # If set to an empty string all service accounts can log in. + # This only has effect if ACLs are enabled. + # + # See https://www.consul.io/docs/acl/acl-auth-methods.html#binding-rules + # and https://www.consul.io/docs/acl/auth-methods/kubernetes.html#trusted-identity-attributes + # for more details. + # Requires Consul >= v1.5 and consul-k8s >= v0.8.0. + aclBindingRuleSelector: "serviceaccount.name!=default" + + # If not using global.bootstrapACLs and instead manually setting up an auth + # method for Connect inject, set this to the name of your auth method. + overrideAuthMethodName: "" + + # aclInjectToken refers to a Kubernetes secret that you have created that contains + # an ACL token for your Consul cluster which allows the Connect injector the correct + # permissions. This is only needed if Consul namespaces [Enterprise only] and ACLs + # are enabled on the Consul cluster and you are not setting `global.bootstrapACLs` + # to `true`. This token needs to have `operator = "write"` privileges to be able to + # create Consul namespaces. + aclInjectToken: + secretName: null + secretKey: null + + # Requires Consul >= v1.5 and consul-k8s >= v0.8.1. + centralConfig: + # enabled controls whether central config is enabled on all servers and clients. + # See https://www.consul.io/docs/agent/options.html#enable_central_service_config. + # If changing this after installation, servers and clients must be restarted + # for the change to take effect. + enabled: true + + # defaultProtocol allows you to specify a convenience default protocol if + # most of your services are of the same protocol type. The individual annotation + # on any given pod will override this value. + # Valid values are "http", "http2", "grpc" and "tcp". + defaultProtocol: null + + # proxyDefaults is a raw json string that will be written as the value of + # the "config" key of the global proxy-defaults config entry. + # See: https://www.consul.io/docs/agent/config-entries/proxy-defaults.html + # NOTE: Changes to this value after the chart is first installed have *no* + # effect. In order to change the proxy-defaults config after installation, + # you must use the Consul API. + proxyDefaults: | + {} + +# Mesh Gateways enable Consul Connect to work across Consul datacenters. +meshGateway: + # If mesh gateways are enabled, a Deployment will be created that runs + # gateways and Consul Connect will be configured to use gateways. + # See https://www.consul.io/docs/connect/mesh_gateway.html + # Requirements: consul >= 1.6.0 and consul-k8s >= 0.9.0 if using global.bootstrapACLs. + enabled: false + + # Globally configure which mode the gateway should run in. + # Can be set to either "remote", "local", "none" or empty string or null. + # See https://consul.io/docs/connect/mesh_gateway.html#modes-of-operation for + # a description of each mode. + # If set to anything other than "" or null, connectInject.centralConfig.enabled + # should be set to true so that the global config will actually be used. + # If set to the empty string, no global default will be set and the gateway mode + # will need to be set individually for each service. + globalMode: local + + # Number of replicas for the Deployment. + replicas: 2 + + # What gets registered as wan address for the gateway. + wanAddress: + # Port that gets registered. + port: 443 + + # If true, each Gateway Pod will advertise its NodeIP + # (as provided by the Kubernetes downward API) as the wan address. + # This is useful if the node IPs are routable from other DCs. + # useNodeName and host must be false and "" respectively. + useNodeIP: true + + # If true, each Gateway Pod will advertise its NodeName + # (as provided by the Kubernetes downward API) as the wan address. + # This is useful if the node names are DNS entries that are + # routable from other DCs. + # meshGateway.wanAddress.port will be used as the port for the wan address. + # useNodeIP and host must be false and "" respectively. + useNodeName: false + + # If set, each gateway Pod will use this host as its wan address. + # Users must ensure that this address routes to the Gateway pods, + # for example via a DNS entry that routes to the Service fronting the Deployment. + # meshGateway.wanAddress.port will be used as the port for the wan address. + # useNodeIP and useNodeName must be false. + host: "" + + # The service option configures the Service that fronts the Gateway Deployment. + service: + # Whether to create a Service or not. + enabled: false + + # Type of service, ex. LoadBalancer, ClusterIP. + type: ClusterIP + + # Port that the service will be exposed on. + # The targetPort will be set to meshGateway.containerPort. + port: 443 + + # Optional nodePort of the service. Can be used in conjunction with + # type: NodePort. + nodePort: null + + # Optional YAML string for additional annotations. + annotations: null + + # Optional YAML string that will be appended to the Service spec. + additionalSpec: null + + # Envoy image to use. For Consul v1.7+, Envoy version 1.13+ is required. + imageEnvoy: envoyproxy/envoy:v1.13.0 + + # If set to true, gateway Pods will run on the host network. + hostNetwork: false + + # dnsPolicy to use. + dnsPolicy: null + + # Override the default 'mesh-gateway' service name registered in Consul. + # Cannot be used if bootstrapACLs is true since the ACL token generated + # is only for the name 'mesh-gateway'. + consulServiceName: "" + + # Port that the gateway will run on inside the container. + containerPort: 443 + + # Optional hostPort for the gateway to be exposed on. + # This can be used with wanAddress.port and wanAddress.useNodeIP + # to expose the gateways directly from the node. + # If hostNetwork is true, this must be null or set to the same port as + # containerPort. + # NOTE: Cannot set to 8500 or 8502 because those are reserved for the Consul + # agent. + hostPort: null + + # If there are no connect-enabled services running, then the gateway + # will fail health checks. You may disable health checks as a temporary + # workaround. + enableHealthChecks: true + + resources: | + requests: + memory: "128Mi" + cpu: "250m" + limits: + memory: "256Mi" + cpu: "500m" + + # By default, we set an anti affinity so that two gateway pods won't be + # on the same node. NOTE: Gateways require that Consul client agents are + # also running on the nodes alongside each gateway Pod. + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: mesh-gateway + topologyKey: kubernetes.io/hostname + + # Optional YAML string to specify tolerations. + tolerations: null + + # Optional YAML string to specify a nodeSelector config. + nodeSelector: null + + # Optional priorityClassName. + priorityClassName: "" + + # Optional YAML string for additional annotations. + annotations: null + +# Control whether a test Pod manifest is generated when running helm template. +# When using helm install, the test Pod is not submitted to the cluster so this +# is only useful when running helm template. +tests: + enabled: true