From 46d7e5b00b12c21d34e52b970e9488dd93eadb4c Mon Sep 17 00:00:00 2001 From: Vladyslav Kurmaz Date: Wed, 21 Aug 2024 23:51:42 +0300 Subject: [PATCH] Feat/v24.8.1 (#57) * feat(aws): RDS allocated storage, SES/Cognito from email * feat(aws): thrird level dns name for environments * Update README.md --- .env.template | 3 + .tln.conf | 13 ++- README.md | 175 ++++++++++++++-------------------- aws/app/dns.tf | 45 +++++++++ aws/app/main.tf | 17 ---- aws/app/nginx.tf | 2 +- aws/app/outputs.tf | 3 + aws/app/postgres.tf | 4 +- aws/app/variables.tf | 9 ++ aws/group/.terraform.lock.hcl | 61 ------------ aws/group/cognito.tf | 5 +- aws/group/jump_server.tf | 2 + aws/group/ses.tf | 2 + whoami.yaml | 2 +- 14 files changed, 151 insertions(+), 192 deletions(-) create mode 100644 aws/app/dns.tf diff --git a/.env.template b/.env.template index 9bbadff..ea7e8a1 100644 --- a/.env.template +++ b/.env.template @@ -20,6 +20,9 @@ TF_VAR_tenant_id= TF_VAR_domain_name=myproject.dev TF_VAR_dns_records=myproject.dev,api,admin +TF_VAR_use_primary_domain=false TF_VAR_rds_pg_db_size=db.t3.micro +TF_VAR_rds_pg_db_allocated_storage=20 +TF_VAR_rds_pg_max_allocated_storage=30 TF_VAR_databases={ "user" = { owner = "admin", password = "admin" }, "auth" = { owner = "admin", password = "admin" } } diff --git a/.tln.conf b/.tln.conf index 2f01299..3056814 100644 --- a/.tln.conf +++ b/.tln.conf @@ -158,6 +158,13 @@ module.exports = { if (env.TLN_CLOUDS_TENANT) { env.TF_VAR_tenant_id = env.TLN_CLOUDS_TENANT; } + // test dns name for whoami + const arr = ['api']; + if (env.TF_VAR_use_primary_domain === 'false') { + arr.push(env.TF_VAR_env_id); + } + arr.push(env.TF_VAR_domain_name); + env.TLN_CLOUDS_WHOAMI_HOST = arr.join('.'); }, dotenvs: async (tln) => { if (fs.existsSync('.env')) return ['.env']; else return [] }, inherits: async (tln) => [], @@ -196,7 +203,7 @@ sshuttle --dns${daemon} -vr ${script.env.TLN_CLOUDS_BASTION} 0/0 --ssh-cmd 'ssh$ { id: 'whoami', builder: async (tln, script) => { const op = script.env.TLN_CLOUDS_DELETE ? 'delete' : 'apply'; script.set([ - `kubectl ${op} -f ${path.join(__dirname, 'whoami.yaml')}` + `envsubst < ${path.join(__dirname, 'whoami.yaml')} | kubectl ${op} -f -` ]); } }, @@ -212,7 +219,7 @@ sshuttle --dns${daemon} -vr ${script.env.TLN_CLOUDS_BASTION} 0/0 --ssh-cmd 'ssh$ tln construct -- --backend cloud${opts} --layers provider --state project,provider tln construct -- --backend cloud${opts} --layers group --state project,provider,group tln construct -- --backend cloud${opts} --layers network,managed --state project,provider,group,env,layer -${script.env.TLN_CLOUDS_CI ? 'tln sshuttle -- --bastion \$(tln get-bastion) --deamon' : ''} +${script.env.TLN_CLOUDS_CI ? '#tln sshuttle -- --bastion \$(tln get-bastion) --deamon' : ''} tln construct -- --backend cloud${opts} --layers app --state project,provider,group,env,layer `].concat( (script.env.TF_VAR_tenant_id) ? [ @@ -224,7 +231,7 @@ tln construct -- --backend cloud${opts} --layers app --state project,provider,gr { id: 'down', builder: async (tln, script) => { const opts = getTerraformOpts(script.env); script.set([ - `${script.env.TLN_CLOUDS_CI ? 'tln sshuttle -- --bastion \$(tln get-bastion) --deamon' : ''}`, + `${script.env.TLN_CLOUDS_CI ? '#tln sshuttle -- --bastion \$(tln get-bastion) --deamon' : ''}`, ].concat(( (script.env.TF_VAR_tenant_id) ? [ `tln deconstruct -- --backend cloud${opts} --layers tenant --state project,provider,group,env,tenant --tenant ${script.env.TF_VAR_tenant_id}`, diff --git a/README.md b/README.md index 6715d63..8187f3d 100644 --- a/README.md +++ b/README.md @@ -3,11 +3,11 @@ ![Infrastructure Instance](ii.png) ## Features -* supports AWS, DO (Azure, GCP - in progress) -* provides Multi-tenancy feature via layers architecture (provider, group, network, managed, app, tenant) -* implements easy-to-construct multiple environment approach, controls by single environment variable - **TF_VAR_env_id** +* Supports AWS (Azure, GCP - in progress) +* Provides Multi-tenancy feature via layers architecture (provider, group, network, managed, app, tenant) +* Implements easy-to-construct multiple environment approach, controls by single environment variable - **TF_VAR_env_id** * IaC via Terraform and Helm -* supports multiple backend providers - Local, Cloud, PG (S3 - in progress) +* Utilises multiple backend providers - Local, Cloud, PG (S3 - in progress) ## Infrastructure Instance layers ![Infrastructure Instance Layers](layers.png) @@ -16,7 +16,7 @@ * Install [tln](https://www.npmjs.com/package/tln-cli) * Goto **projects** folder from tln-cli installation above and clone repository ``` - git clone --depth 1 --branch v24.4.0 git@github.com:project-talan/tln-clouds.git && cd tln-clouds + git clone --depth 1 --branch v24.8.1 git@github.com:project-talan/tln-clouds.git && cd tln-clouds ``` > Important
> * Commands below assume that Terraform Cloud is used as a storage for states
@@ -36,9 +36,12 @@ TF_VAR_repositories=io.myproject.services.api,io.myproject.web.landing TF_VAR_domain_name=myproject.io - TF_VAR_dns_records=myproject.io,api + TF_VAR_dns_records=dev01.myproject.io,api + TF_VAR_use_primary_domain=false TF_VAR_rds_pg_db_size=db.t3.micro + TF_VAR_rds_pg_db_allocated_storage=20 + TF_VAR_rds_pg_max_allocated_storage=30 TF_VAR_databases={ "user" = { owner = "admin", password = "admin" }, "auth" = { owner = "admin", password = "admin" } } ``` @@ -49,134 +52,94 @@ AWS_SECRET_ACCESS_KEY= AWS_DEFAULT_REGION=eu-central-1 - TF_VAR_aws_k8s_version=1.29 + TF_VAR_aws_k8s_version=1.30 TF_VAR_aws_k8s_nodes_min=1 TF_VAR_aws_k8s_nodes_desired=2 TF_VAR_aws_k8s_nodes_max=3 TF_VAR_aws_k8s_nodes_size=t3a.medium TF_VAR_aws_k8s_nodes_disk=50 ``` -* Install dependencies +* **Install dependencies** ``` tln install aws --depends ``` -* Construct four AWS Dev infrastructure instance layers +* Construct four AWS Infrastructure Instance layers - (1) Provider layer - ERC - ``` - tln construct aws -- --backend cloud --init --apply --layers provider --state project,provider - ``` - (2) Groupr layer - domain name, certificate & validation. You will need to modify DNS nameservers at your registrar side - ``` - tln construct aws -- --backend cloud --init --apply --layers group --state project,provider,group - ``` - (3,4) Network and Managed layers - VPC, Bastion, K8s - ``` - tln construct aws -- --backend cloud --init --apply --layers network,managed --state project,provider,group,env,layer - ``` + 1. **Provider layer - configure ERC** + ``` + tln construct aws -- --backend cloud --init --apply --layers provider --state project,provider + ``` + 2. **Groupr layer - configure Route53, certificate & validation. You will need to modify DNS nameservers at your registrar side** + ``` + tln construct aws -- --backend cloud --init --apply --layers group --state project,provider,group + ``` + 3. **Network and Managed layers - configure VPC, Bastion, K8s** + ``` + tln construct aws -- --backend cloud --init --apply --layers network,managed --state project,provider,group,env,layer + ``` * At this point you have ready to use cloud infrastructure with K8s and secure access via bastion - (1) Initiate sshuttle connection to the cluster via bastion (first terminal) - ``` - tln connect aws - ``` - (2) Open shell with necessary environment variables (second terminal) - ``` - tln shell aws - ``` - (3) Check cluster (second terminal) - ``` - kubectl get pods -A - ``` - (4) Close shell (second terminal) - ``` - ^D - ``` - (5) Close sshuttle connection (first terminal) - ``` - ^C - ``` + 1. **Initiate sshuttle connection to the cluster via bastion (first terminal)** + ``` + tln connect aws + ``` + 2. **Open shell with necessary environment variables (second terminal)** + ``` + tln shell aws + ``` + 3. **Check cluster (second terminal)** + ``` + kubectl get pods -A + ``` + 4. **Close shell (second terminal)** + ``` + ^D + ``` + 5. **Close sshuttle connection (first terminal)** + ``` + ^C + ``` * You can go extra mile and deploy your SaaS-specific resources - (1) Start secure sshuttle connection (first terminal) - ``` - tln connect aws - ``` - (2) Deploy App layer - Nginx ingress, Postgres DBs, DNS records (second terminal) - ``` - tln construct aws -- --backend cloud --init --apply --layers app --state project,provider,group,env,layer - ``` - (3) You can check endpoints availability in browser https://myprojecy.io & https://api.myproject.io - -* Now you can deconstruct all layers and free all Cloud resources - - (1) Undeploy App layer (second terminal) - ``` - tln deconstruct aws -- --backend cloud --init --apply --layers app --state project,provider,group,env,layer - ``` - (2) Close sshuttle connection (first terminal) - ``` - ^C - ``` - (3,4) Delete Network and Managed layers - ``` - tln deconstruct aws -- --backend cloud --init --apply --layers network,managed --state project,provider,group,env,layer - ``` - (5) Delete Groupr layer - ``` - tln deconstruct aws -- --backend cloud --init --apply --layers group --state project,provider,group - ``` - (6) Delete Provider layer - ``` - tln deconstruct aws -- --backend cloud --init --apply --layers provider --state project,provider - ``` - -### Digital Ocean - * Create **do/.env** file using **do/.env.template** as an example + 1. **Start secure sshuttle connection (first terminal)** ``` - DIGITALOCEAN_TOKEN= - - TF_VAR_do_region=nyc3 - TF_VAR_do_k8s_version=1.28.2-do.0 - TF_VAR_do_k8s_nodes_min=1 - TF_VAR_do_k8s_nodes_max=2 - TF_VAR_do_k8s_nodes_size=s-2vcpu-2gb + tln connect aws ``` -* Install dependencies - ``` - tln install do --depends - ``` -* Construct DO Dev infrastructure instance - ``` - tln construct do -- --backend cloud --init --plan --apply - ``` -* Verify access to the k8s cluster and install/uninstall ingress - * Create ssh session + 2. **Deploy App layer - configure Nginx ingress, Postgres DBs, DNS records (second terminal)** ``` - tln shell do + tln construct aws -- --backend cloud --init --apply --layers app --state project,provider,group,env,layer ``` + 3. **You can check endpoints availability in browser https://myprojecy.io & https://api.myproject.io** + +* Now you can deconstruct all layers and free all Cloud resources + + 1. **Undeploy App layer (second terminal)** ``` - tln nginx-ingress-install@k8s -- --ver 4.8.3 + tln deconstruct aws -- --backend cloud --init --apply --layers app --state project,provider,group,env,layer ``` + 2. **Close sshuttle connection (first terminal)** ``` - kubectl get pods --all-namespaces + ^C ``` + 3. **Delete Network and Managed layers** ``` - tln nginx-ingress-status@k8s + tln deconstruct aws -- --backend cloud --init --apply --layers network,managed --state project,provider,group,env,layer ``` - * Use IP address from command output above to check access to the cluster using browser/curl - * Uninstall Ingress + 4. **Delete Groupr layer** ``` - tln nginx-ingress-uninstall@k8s + tln deconstruct aws -- --backend cloud --init --apply --layers group --state project,provider,group ``` - * Close ssh session + 5. **Delete Provider layer** ``` - ^d + tln deconstruct aws -- --backend cloud --init --apply --layers provider --state project,provider ``` -* Deconstruct DO Dev infrastructure instance - ``` - tln deconstruct do -- --backend cloud --plan --apply - ``` + +### Azure + In development + +### GCP + In development + ## Command line options General format ``` diff --git a/aws/app/dns.tf b/aws/app/dns.tf new file mode 100644 index 0000000..5d62b95 --- /dev/null +++ b/aws/app/dns.tf @@ -0,0 +1,45 @@ + +locals { + subdomain_name = "${var.env_id}.${var.domain_name}" +} + +resource "aws_route53_zone" "secondary" { + name = local.subdomain_name + tags = module.shared.tags +} + +resource "aws_route53_record" "ns" { + zone_id = data.aws_route53_zone.primary.zone_id + name = local.subdomain_name + type = "NS" + ttl = "30" + records = aws_route53_zone.secondary.name_servers +} + +module "secondary_certificate" { + source = "terraform-aws-modules/acm/aws" + version = "4.5.0" + + domain_name = local.subdomain_name + subject_alternative_names = ["*.${local.subdomain_name}"] + zone_id = aws_route53_zone.secondary.zone_id + + wait_for_validation = true +} + +resource "aws_route53_record" "record" { + for_each = toset(split(",", var.dns_records)) + zone_id = var.use_primary_domain ? data.aws_route53_zone.primary.zone_id : aws_route53_zone.secondary.zone_id + name = each.key + type = "A" + + alias { + name = data.aws_lb.primary.dns_name + zone_id = data.aws_lb.primary.zone_id + evaluate_target_health = false + } + depends_on = [ + helm_release.nginx, + data.aws_lb.primary + ] +} diff --git a/aws/app/main.tf b/aws/app/main.tf index 9267ad2..2e2620b 100644 --- a/aws/app/main.tf +++ b/aws/app/main.tf @@ -5,20 +5,3 @@ module "shared" { group_id = var.group_id env_id = var.env_id } - -resource "aws_route53_record" "record" { - for_each = toset(split(",", var.dns_records)) - zone_id = data.aws_route53_zone.primary.zone_id - name = each.key - type = "A" - - alias { - name = data.aws_lb.primary.dns_name - zone_id = data.aws_lb.primary.zone_id - evaluate_target_health = false - } - depends_on = [ - helm_release.nginx, - data.aws_lb.primary - ] -} diff --git a/aws/app/nginx.tf b/aws/app/nginx.tf index dfabab2..02db8bf 100644 --- a/aws/app/nginx.tf +++ b/aws/app/nginx.tf @@ -29,7 +29,7 @@ resource "helm_release" "nginx" { } set { name = "controller.service.annotations.service\\.beta\\.kubernetes\\.io/aws-load-balancer-ssl-cert" - value = data.aws_acm_certificate.primary.arn + value = var.use_primary_domain ? data.aws_acm_certificate.primary.arn : module.secondary_certificate.acm_certificate_arn type = "string" } diff --git a/aws/app/outputs.tf b/aws/app/outputs.tf index e69de29..3eedb3c 100644 --- a/aws/app/outputs.tf +++ b/aws/app/outputs.tf @@ -0,0 +1,3 @@ +output "use_primary_domain" { + value = var.use_primary_domain +} diff --git a/aws/app/postgres.tf b/aws/app/postgres.tf index 7668afd..e67accd 100644 --- a/aws/app/postgres.tf +++ b/aws/app/postgres.tf @@ -29,8 +29,8 @@ module "rds_pg" { major_engine_version = "15" instance_class = var.rds_pg_db_size - allocated_storage = 20 - max_allocated_storage = 30 + allocated_storage = var.rds_pg_db_allocated_storage + max_allocated_storage = var.rds_pg_max_allocated_storage db_name = "postgres" username = "root" diff --git a/aws/app/variables.tf b/aws/app/variables.tf index 6a928fd..0102830 100644 --- a/aws/app/variables.tf +++ b/aws/app/variables.tf @@ -17,10 +17,19 @@ variable "domain_name" { variable "dns_records" { type = string } +variable "use_primary_domain" { + type = bool +} variable "rds_pg_db_size" { type = string } +variable "rds_pg_db_allocated_storage" { + type = string +} +variable "rds_pg_max_allocated_storage" { + type = string +} variable "databases" { description = "A map of databases, their owners and passwords" type = map(object({ diff --git a/aws/group/.terraform.lock.hcl b/aws/group/.terraform.lock.hcl index e470e4d..e3cffee 100644 --- a/aws/group/.terraform.lock.hcl +++ b/aws/group/.terraform.lock.hcl @@ -1,28 +1,6 @@ # This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. -provider "registry.terraform.io/cloudposse/awsutils" { - version = "0.19.1" - constraints = ">= 0.11.0" - hashes = [ - "h1:POenlHr7u+rJ0WgYMtt0DAQgHlWXoTWAWNMt4s5tRfg=", - "zh:2f2a6ea60b791dca4913cca800928380f169c0c60bd4773bb23518cd400364e0", - "zh:45a7ce56b6b127a8f37b250c97a825055e4ee39f809de1d2b80ec2a65808e2dd", - "zh:64887d608a21d363754efe7618b89816d6fecbf6c46e0ad7e87c963ac8859e10", - "zh:8868a2524577c4fd0c786b569fa6279fb00c91f53e3765a9ed237f3dece4de6b", - "zh:8a21d2e2c8ae8169c68b9182c7d7c7704c65f6a5dfe438f9231d6daec8b13095", - "zh:ab374c7a75e98468437a3f41e6c882d9f51cb2b4b5f822b0269926b50023adab", - "zh:c323b7d5190f891257e3700f9d37ffbb94c2a8a14619a77a36ed38c8c3cb347e", - "zh:c512756ed8f02336647b9855254a2b3bf6935dadb9465a8d2a529cf120eb13dd", - "zh:d9bcf7861b3e9704ac40615e3900993fbce205f64d57f472f606f085362972b9", - "zh:dd3cf0e68c52a34ce7e12cc39c6f42fda092ef3a870f3c119bdced92bfe476e7", - "zh:df2907241946bd88be11c12a94c4987bf8e450bea8f4affdd148445824ebd065", - "zh:e50f326ee7e6687f1ef4c9e6867d6a713cef7a687521db74f8fdc612aa51fccb", - "zh:e7ba24f44af3db14aa1cc181643beb8484cb5e7eb56708bf1d728c92fa3f29f7", - "zh:e85c460e606e1c1abf689ea2588c55357f42431bd7d346fd1d59cae6dd5c9a83", - ] -} - provider "registry.terraform.io/hashicorp/aws" { version = "5.43.0" constraints = ">= 4.40.0, 5.43.0" @@ -45,42 +23,3 @@ provider "registry.terraform.io/hashicorp/aws" { "zh:fecbcbd63111c9518de261bcb37482cb06ee149e7298f567d45b2a55674faa75", ] } - -provider "registry.terraform.io/hashicorp/local" { - version = "2.5.1" - hashes = [ - "h1:tjcGlQAFA0kmQ4vKkIPPUC4it1UYxLbg4YvHOWRAJHA=", - "zh:0af29ce2b7b5712319bf6424cb58d13b852bf9a777011a545fac99c7fdcdf561", - "zh:126063ea0d79dad1f68fa4e4d556793c0108ce278034f101d1dbbb2463924561", - "zh:196bfb49086f22fd4db46033e01655b0e5e036a5582d250412cc690fa7995de5", - "zh:37c92ec084d059d37d6cffdb683ccf68e3a5f8d2eb69dd73c8e43ad003ef8d24", - "zh:4269f01a98513651ad66763c16b268f4c2da76cc892ccfd54b401fff6cc11667", - "zh:51904350b9c728f963eef0c28f1d43e73d010333133eb7f30999a8fb6a0cc3d8", - "zh:73a66611359b83d0c3fcba2984610273f7954002febb8a57242bbb86d967b635", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:7ae387993a92bcc379063229b3cce8af7eaf082dd9306598fcd42352994d2de0", - "zh:9e0f365f807b088646db6e4a8d4b188129d9ebdbcf2568c8ab33bddd1b82c867", - "zh:b5263acbd8ae51c9cbffa79743fbcadcb7908057c87eb22fd9048268056efbc4", - "zh:dfcd88ac5f13c0d04e24be00b686d069b4879cc4add1b7b1a8ae545783d97520", - ] -} - -provider "registry.terraform.io/hashicorp/null" { - version = "3.2.2" - constraints = ">= 2.0.0" - hashes = [ - "h1:vWAsYRd7MjYr3adj8BVKRohVfHpWQdvkIwUQ2Jf5FVM=", - "zh:3248aae6a2198f3ec8394218d05bd5e42be59f43a3a7c0b71c66ec0df08b69e7", - "zh:32b1aaa1c3013d33c245493f4a65465eab9436b454d250102729321a44c8ab9a", - "zh:38eff7e470acb48f66380a73a5c7cdd76cc9b9c9ba9a7249c7991488abe22fe3", - "zh:4c2f1faee67af104f5f9e711c4574ff4d298afaa8a420680b0cb55d7bbc65606", - "zh:544b33b757c0b954dbb87db83a5ad921edd61f02f1dc86c6186a5ea86465b546", - "zh:696cf785090e1e8cf1587499516b0494f47413b43cb99877ad97f5d0de3dc539", - "zh:6e301f34757b5d265ae44467d95306d61bef5e41930be1365f5a8dcf80f59452", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:913a929070c819e59e94bb37a2a253c228f83921136ff4a7aa1a178c7cce5422", - "zh:aa9015926cd152425dbf86d1abdbc74bfe0e1ba3d26b3db35051d7b9ca9f72ae", - "zh:bb04798b016e1e1d49bcc76d62c53b56c88c63d6f2dfe38821afef17c416a0e1", - "zh:c23084e1b23577de22603cff752e59128d83cfecc2e6819edadd8cf7a10af11e", - ] -} diff --git a/aws/group/cognito.tf b/aws/group/cognito.tf index 6b1a8c3..55f285d 100644 --- a/aws/group/cognito.tf +++ b/aws/group/cognito.tf @@ -1,3 +1,5 @@ +// Uncomment the following code if you want to enable Cognito User Pool + /* locals { api_base_url = "http://localhost" @@ -21,8 +23,9 @@ module "cognito_user_pool" { email_configuration = { email_sending_account = "DEVELOPER" - reply_to_email_address = "no-reply@${var.domain_name}" + reply_to_email_address = "no-reply@no-reply.${var.domain_name}" source_arn = module.ses.ses_domain_identity_arn + from_email_address = "no-reply@no-reply.${var.domain_name}" } string_schemas = [ diff --git a/aws/group/jump_server.tf b/aws/group/jump_server.tf index 8bd0cab..4dc9c17 100644 --- a/aws/group/jump_server.tf +++ b/aws/group/jump_server.tf @@ -1,3 +1,5 @@ +// Uncomment the following code if you want to enable Jump Server + /* data "aws_ami" "ubuntu" { most_recent = false diff --git a/aws/group/ses.tf b/aws/group/ses.tf index 937c7ad..5ea62ee 100644 --- a/aws/group/ses.tf +++ b/aws/group/ses.tf @@ -1,3 +1,5 @@ +// Uncomment the following code if you want to enable SES (Simple Email Service) + /* module "ses" { source = "cloudposse/ses/aws" diff --git a/whoami.yaml b/whoami.yaml index 560f187..570683e 100644 --- a/whoami.yaml +++ b/whoami.yaml @@ -45,7 +45,7 @@ metadata: nginx.ingress.kubernetes.io/rewrite-target: "/$2" spec: rules: - - host: "" + - host: "${TLN_CLOUDS_WHOAMI_HOST}" http: paths: - path: "/whoami(/|$)(.*)"