forked from hashicorp/vault-helm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.tf
72 lines (61 loc) · 2.41 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
provider "google" {
project = "${var.project}"
}
resource "random_id" "suffix" {
byte_length = 4
}
data "google_container_engine_versions" "main" {
location = "${var.zone}"
version_prefix = "1.19."
}
data "google_service_account" "gcpapi" {
account_id = "${var.gcp_service_account}"
}
resource "google_container_cluster" "cluster" {
name = "vault-helm-dev-${random_id.suffix.dec}"
project = "${var.project}"
enable_legacy_abac = true
initial_node_count = 3
location = "${var.zone}"
min_master_version = "${data.google_container_engine_versions.main.latest_master_version}"
node_version = "${data.google_container_engine_versions.main.latest_node_version}"
node_config {
#service account for nodes to use
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_write",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/trace.append",
]
service_account = "${data.google_service_account.gcpapi.email}"
}
}
resource "null_resource" "kubectl" {
count = "${var.init_cli ? 1 : 0 }"
triggers = {
cluster = "${google_container_cluster.cluster.id}"
}
# On creation, we want to setup the kubectl credentials. The easiest way
# to do this is to shell out to gcloud.
provisioner "local-exec" {
command = "gcloud container clusters get-credentials --zone=${var.zone} ${google_container_cluster.cluster.name}"
}
# On destroy we want to try to clean up the kubectl credentials. This
# might fail if the credentials are already cleaned up or something so we
# want this to continue on failure. Generally, this works just fine since
# it only operates on local data.
provisioner "local-exec" {
when = "destroy"
on_failure = "continue"
command = "kubectl config get-clusters | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-cluster"
}
provisioner "local-exec" {
when = "destroy"
on_failure = "continue"
command = "kubectl config get-contexts | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-context"
}
}