forked from terraform-aws-modules/terraform-aws-eks
-
Notifications
You must be signed in to change notification settings - Fork 1
/
cluster.tf
159 lines (137 loc) · 5.35 KB
/
cluster.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
resource "aws_cloudwatch_log_group" "this" {
count = length(var.cluster_enabled_log_types) > 0 && var.create_eks ? 1 : 0
name = "/aws/eks/${var.cluster_name}/cluster"
retention_in_days = var.cluster_log_retention_in_days
kms_key_id = var.cluster_log_kms_key_id
tags = var.tags
}
resource "aws_eks_cluster" "this" {
count = var.create_eks ? 1 : 0
name = var.cluster_name
enabled_cluster_log_types = var.cluster_enabled_log_types
role_arn = local.cluster_iam_role_arn
version = var.cluster_version
tags = var.tags
vpc_config {
security_group_ids = compact([local.cluster_security_group_id])
subnet_ids = var.subnets
endpoint_private_access = var.cluster_endpoint_private_access
endpoint_public_access = var.cluster_endpoint_public_access
public_access_cidrs = var.cluster_endpoint_public_access_cidrs
}
timeouts {
create = var.cluster_create_timeout
delete = var.cluster_delete_timeout
}
dynamic encryption_config {
for_each = toset(var.cluster_encryption_config)
content {
provider {
key_arn = encryption_config.value["provider_key_arn"]
}
resources = encryption_config.value["resources"]
}
}
depends_on = [
aws_security_group_rule.cluster_egress_internet,
aws_security_group_rule.cluster_https_worker_ingress,
aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy,
aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy,
aws_cloudwatch_log_group.this
]
}
resource "aws_security_group_rule" "cluster_private_access" {
count = var.create_eks && var.cluster_endpoint_private_access && var.cluster_endpoint_public_access == false ? 1 : 0
type = "ingress"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = var.cluster_endpoint_private_access_cidrs
security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
}
resource "null_resource" "wait_for_cluster" {
count = var.create_eks && var.manage_aws_auth ? 1 : 0
depends_on = [
aws_eks_cluster.this[0],
aws_security_group_rule.cluster_private_access,
]
provisioner "local-exec" {
command = var.wait_for_cluster_cmd
interpreter = var.wait_for_cluster_interpreter
environment = {
ENDPOINT = aws_eks_cluster.this[0].endpoint
}
}
}
resource "aws_security_group" "cluster" {
count = var.cluster_create_security_group && var.create_eks ? 1 : 0
name_prefix = var.cluster_name
description = "EKS cluster security group."
vpc_id = var.vpc_id
tags = merge(
var.tags,
{
"Name" = "${var.cluster_name}-eks_cluster_sg"
},
)
}
resource "aws_security_group_rule" "cluster_egress_internet" {
count = var.cluster_create_security_group && var.create_eks ? 1 : 0
description = "Allow cluster egress access to the Internet."
protocol = "-1"
security_group_id = local.cluster_security_group_id
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "cluster_https_worker_ingress" {
count = var.cluster_create_security_group && var.create_eks ? 1 : 0
description = "Allow pods to communicate with the EKS cluster API."
protocol = "tcp"
security_group_id = local.cluster_security_group_id
source_security_group_id = local.worker_security_group_id
from_port = 443
to_port = 443
type = "ingress"
}
resource "aws_iam_role" "cluster" {
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
name_prefix = var.cluster_name
assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy.json
permissions_boundary = var.permissions_boundary
path = var.iam_path
force_detach_policies = true
tags = var.tags
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
policy_arn = "${local.policy_arn_prefix}/AmazonEKSClusterPolicy"
role = local.cluster_iam_role_name
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" {
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
policy_arn = "${local.policy_arn_prefix}/AmazonEKSServicePolicy"
role = local.cluster_iam_role_name
}
/*
Adding a policy to cluster IAM role that allow permissions
required to create AWSServiceRoleForElasticLoadBalancing service-linked role by EKS during ELB provisioning
*/
data "aws_iam_policy_document" "cluster_elb_sl_role_creation" {
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
statement {
effect = "Allow"
actions = [
"ec2:DescribeAccountAttributes",
"ec2:DescribeInternetGateways"
]
resources = ["*"]
}
}
resource "aws_iam_role_policy" "cluster_elb_sl_role_creation" {
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
name_prefix = "${var.cluster_name}-elb-sl-role-creation"
role = local.cluster_iam_role_name
policy = data.aws_iam_policy_document.cluster_elb_sl_role_creation[0].json
}