helm consul toegevoegd ma nie als submodule?

This commit is contained in:
2020-03-09 12:27:10 +01:00
parent 31368e50b2
commit 3432492589
157 changed files with 14508 additions and 0 deletions

View File

@@ -0,0 +1,71 @@
locals {
service_account_path = "${path.module}/service-account.yaml"
}
provider "google" {
project = var.project
}
resource "random_id" "suffix" {
byte_length = 4
}
data "google_container_engine_versions" "main" {
location = var.zone
}
resource "google_container_cluster" "cluster" {
name = "consul-k8s-${random_id.suffix.dec}"
project = var.project
enable_legacy_abac = true
initial_node_count = 3
location = var.zone
min_master_version = data.google_container_engine_versions.main.latest_master_version
node_version = data.google_container_engine_versions.main.latest_node_version
}
resource "null_resource" "kubectl" {
count = var.init_cli ? 1 : 0
triggers = {
cluster = google_container_cluster.cluster.id
}
# On creation, we want to setup the kubectl credentials. The easiest way
# to do this is to shell out to gcloud.
provisioner "local-exec" {
command = "gcloud container clusters get-credentials --zone=${var.zone} ${google_container_cluster.cluster.name}"
}
# On destroy we want to try to clean up the kubectl credentials. This
# might fail if the credentials are already cleaned up or something so we
# want this to continue on failure. Generally, this works just fine since
# it only operates on local data.
provisioner "local-exec" {
when = "destroy"
on_failure = "continue"
command = "kubectl config get-clusters | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-cluster"
}
provisioner "local-exec" {
when = "destroy"
on_failure = "continue"
command = "kubectl config get-contexts | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-context"
}
}
resource "null_resource" "helm" {
count = var.init_cli ? 1 : 0
depends_on = ["null_resource.kubectl"]
triggers = {
cluster = google_container_cluster.cluster.id
}
provisioner "local-exec" {
command = <<EOF
kubectl apply -f '${local.service_account_path}'
helm init --service-account helm --wait
EOF
}
}

View File

@@ -0,0 +1,7 @@
output "cluster_id" {
value = google_container_cluster.cluster.id
}
output "cluster_name" {
value = google_container_cluster.cluster.name
}

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: helm
namespace: kube-system

View File

@@ -0,0 +1,17 @@
variable "project" {
description = <<EOF
Google Cloud Project to launch resources in. This project must have GKE
enabled and billing activated. We can't use the GOOGLE_PROJECT environment
variable since we need to access the project for other uses.
EOF
}
variable "zone" {
default = "us-central1-a"
description = "The zone to launch all the GKE nodes in."
}
variable "init_cli" {
default = false
description = "Whether to init the CLI tools kubectl, helm, etc. or not."
}