Skip to content

Commit

Permalink
Feat/v24.8.1 (#57)
Browse files Browse the repository at this point in the history
* feat(aws): RDS allocated storage, SES/Cognito from email

* feat(aws): thrird level dns name for environments

* Update README.md
  • Loading branch information
VladyslavKurmaz authored Aug 21, 2024
1 parent d0b9dd5 commit 46d7e5b
Show file tree
Hide file tree
Showing 14 changed files with 151 additions and 192 deletions.
3 changes: 3 additions & 0 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ TF_VAR_tenant_id=

TF_VAR_domain_name=myproject.dev
TF_VAR_dns_records=myproject.dev,api,admin
TF_VAR_use_primary_domain=false

TF_VAR_rds_pg_db_size=db.t3.micro
TF_VAR_rds_pg_db_allocated_storage=20
TF_VAR_rds_pg_max_allocated_storage=30
TF_VAR_databases={ "user" = { owner = "admin", password = "admin" }, "auth" = { owner = "admin", password = "admin" } }
13 changes: 10 additions & 3 deletions .tln.conf
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,13 @@ module.exports = {
if (env.TLN_CLOUDS_TENANT) {
env.TF_VAR_tenant_id = env.TLN_CLOUDS_TENANT;
}
// test dns name for whoami
const arr = ['api'];
if (env.TF_VAR_use_primary_domain === 'false') {
arr.push(env.TF_VAR_env_id);
}
arr.push(env.TF_VAR_domain_name);
env.TLN_CLOUDS_WHOAMI_HOST = arr.join('.');
},
dotenvs: async (tln) => { if (fs.existsSync('.env')) return ['.env']; else return [] },
inherits: async (tln) => [],
Expand Down Expand Up @@ -196,7 +203,7 @@ sshuttle --dns${daemon} -vr ${script.env.TLN_CLOUDS_BASTION} 0/0 --ssh-cmd 'ssh$
{ id: 'whoami', builder: async (tln, script) => {
const op = script.env.TLN_CLOUDS_DELETE ? 'delete' : 'apply';
script.set([
`kubectl ${op} -f ${path.join(__dirname, 'whoami.yaml')}`
`envsubst < ${path.join(__dirname, 'whoami.yaml')} | kubectl ${op} -f -`
]);
}
},
Expand All @@ -212,7 +219,7 @@ sshuttle --dns${daemon} -vr ${script.env.TLN_CLOUDS_BASTION} 0/0 --ssh-cmd 'ssh$
tln construct -- --backend cloud${opts} --layers provider --state project,provider
tln construct -- --backend cloud${opts} --layers group --state project,provider,group
tln construct -- --backend cloud${opts} --layers network,managed --state project,provider,group,env,layer
${script.env.TLN_CLOUDS_CI ? 'tln sshuttle -- --bastion \$(tln get-bastion) --deamon' : ''}
${script.env.TLN_CLOUDS_CI ? '#tln sshuttle -- --bastion \$(tln get-bastion) --deamon' : ''}
tln construct -- --backend cloud${opts} --layers app --state project,provider,group,env,layer
`].concat(
(script.env.TF_VAR_tenant_id) ? [
Expand All @@ -224,7 +231,7 @@ tln construct -- --backend cloud${opts} --layers app --state project,provider,gr
{ id: 'down', builder: async (tln, script) => {
const opts = getTerraformOpts(script.env);
script.set([
`${script.env.TLN_CLOUDS_CI ? 'tln sshuttle -- --bastion \$(tln get-bastion) --deamon' : ''}`,
`${script.env.TLN_CLOUDS_CI ? '#tln sshuttle -- --bastion \$(tln get-bastion) --deamon' : ''}`,
].concat((
(script.env.TF_VAR_tenant_id) ? [
`tln deconstruct -- --backend cloud${opts} --layers tenant --state project,provider,group,env,tenant --tenant ${script.env.TF_VAR_tenant_id}`,
Expand Down
175 changes: 69 additions & 106 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@
![Infrastructure Instance](ii.png)

## Features
* supports AWS, DO (Azure, GCP - in progress)
* provides Multi-tenancy feature via layers architecture (provider, group, network, managed, app, tenant)
* implements easy-to-construct multiple environment approach, controls by single environment variable - **TF_VAR_env_id**
* Supports AWS (Azure, GCP - in progress)
* Provides Multi-tenancy feature via layers architecture (provider, group, network, managed, app, tenant)
* Implements easy-to-construct multiple environment approach, controls by single environment variable - **TF_VAR_env_id**
* IaC via Terraform and Helm
* supports multiple backend providers - Local, Cloud, PG (S3 - in progress)
* Utilises multiple backend providers - Local, Cloud, PG (S3 - in progress)

## Infrastructure Instance layers
![Infrastructure Instance Layers](layers.png)
Expand All @@ -16,7 +16,7 @@
* Install [tln](https://www.npmjs.com/package/tln-cli)
* Goto **projects** folder from tln-cli installation above and clone repository
```
git clone --depth 1 --branch v24.4.0 [email protected]:project-talan/tln-clouds.git && cd tln-clouds
git clone --depth 1 --branch v24.8.1 [email protected]:project-talan/tln-clouds.git && cd tln-clouds
```
> Important<br>
> * Commands below assume that Terraform Cloud is used as a storage for states<br/>
Expand All @@ -36,9 +36,12 @@
TF_VAR_repositories=io.myproject.services.api,io.myproject.web.landing
TF_VAR_domain_name=myproject.io
TF_VAR_dns_records=myproject.io,api
TF_VAR_dns_records=dev01.myproject.io,api
TF_VAR_use_primary_domain=false
TF_VAR_rds_pg_db_size=db.t3.micro
TF_VAR_rds_pg_db_allocated_storage=20
TF_VAR_rds_pg_max_allocated_storage=30
TF_VAR_databases={ "user" = { owner = "admin", password = "admin" }, "auth" = { owner = "admin", password = "admin" } }
```
Expand All @@ -49,134 +52,94 @@
AWS_SECRET_ACCESS_KEY=<your_aws_key>
AWS_DEFAULT_REGION=eu-central-1
TF_VAR_aws_k8s_version=1.29
TF_VAR_aws_k8s_version=1.30
TF_VAR_aws_k8s_nodes_min=1
TF_VAR_aws_k8s_nodes_desired=2
TF_VAR_aws_k8s_nodes_max=3
TF_VAR_aws_k8s_nodes_size=t3a.medium
TF_VAR_aws_k8s_nodes_disk=50
```
* Install dependencies
* **Install dependencies**
```
tln install aws --depends
```
* Construct four AWS Dev infrastructure instance layers
* Construct four AWS Infrastructure Instance layers
(1) Provider layer - ERC
```
tln construct aws -- --backend cloud --init --apply --layers provider --state project,provider
```
(2) Groupr layer - domain name, certificate & validation. You will need to modify DNS nameservers at your registrar side
```
tln construct aws -- --backend cloud --init --apply --layers group --state project,provider,group
```
(3,4) Network and Managed layers - VPC, Bastion, K8s
```
tln construct aws -- --backend cloud --init --apply --layers network,managed --state project,provider,group,env,layer
```
1. **Provider layer - configure ERC**
```
tln construct aws -- --backend cloud --init --apply --layers provider --state project,provider
```
2. **Groupr layer - configure Route53, certificate & validation. You will need to modify DNS nameservers at your registrar side**
```
tln construct aws -- --backend cloud --init --apply --layers group --state project,provider,group
```
3. **Network and Managed layers - configure VPC, Bastion, K8s**
```
tln construct aws -- --backend cloud --init --apply --layers network,managed --state project,provider,group,env,layer
```
* At this point you have ready to use cloud infrastructure with K8s and secure access via bastion
(1) Initiate sshuttle connection to the cluster via bastion (first terminal)
```
tln connect aws
```
(2) Open shell with necessary environment variables (second terminal)
```
tln shell aws
```
(3) Check cluster (second terminal)
```
kubectl get pods -A
```
(4) Close shell (second terminal)
```
^D
```
(5) Close sshuttle connection (first terminal)
```
^C
```
1. **Initiate sshuttle connection to the cluster via bastion (first terminal)**
```
tln connect aws
```
2. **Open shell with necessary environment variables (second terminal)**
```
tln shell aws
```
3. **Check cluster (second terminal)**
```
kubectl get pods -A
```
4. **Close shell (second terminal)**
```
^D
```
5. **Close sshuttle connection (first terminal)**
```
^C
```
* You can go extra mile and deploy your SaaS-specific resources
(1) Start secure sshuttle connection (first terminal)
```
tln connect aws
```
(2) Deploy App layer - Nginx ingress, Postgres DBs, DNS records (second terminal)
```
tln construct aws -- --backend cloud --init --apply --layers app --state project,provider,group,env,layer
```
(3) You can check endpoints availability in browser https://myprojecy.io & https://api.myproject.io
* Now you can deconstruct all layers and free all Cloud resources
(1) Undeploy App layer (second terminal)
```
tln deconstruct aws -- --backend cloud --init --apply --layers app --state project,provider,group,env,layer
```
(2) Close sshuttle connection (first terminal)
```
^C
```
(3,4) Delete Network and Managed layers
```
tln deconstruct aws -- --backend cloud --init --apply --layers network,managed --state project,provider,group,env,layer
```
(5) Delete Groupr layer
```
tln deconstruct aws -- --backend cloud --init --apply --layers group --state project,provider,group
```
(6) Delete Provider layer
```
tln deconstruct aws -- --backend cloud --init --apply --layers provider --state project,provider
```
### Digital Ocean
* Create **do/.env** file using **do/.env.template** as an example
1. **Start secure sshuttle connection (first terminal)**
```
DIGITALOCEAN_TOKEN=<your_do_token>
TF_VAR_do_region=nyc3
TF_VAR_do_k8s_version=1.28.2-do.0
TF_VAR_do_k8s_nodes_min=1
TF_VAR_do_k8s_nodes_max=2
TF_VAR_do_k8s_nodes_size=s-2vcpu-2gb
tln connect aws
```
* Install dependencies
```
tln install do --depends
```
* Construct DO Dev infrastructure instance
```
tln construct do -- --backend cloud --init --plan --apply
```
* Verify access to the k8s cluster and install/uninstall ingress
* Create ssh session
2. **Deploy App layer - configure Nginx ingress, Postgres DBs, DNS records (second terminal)**
```
tln shell do
tln construct aws -- --backend cloud --init --apply --layers app --state project,provider,group,env,layer
```
3. **You can check endpoints availability in browser https://myprojecy.io & https://api.myproject.io**
* Now you can deconstruct all layers and free all Cloud resources
1. **Undeploy App layer (second terminal)**
```
tln nginx-ingress-install@k8s -- --ver 4.8.3
tln deconstruct aws -- --backend cloud --init --apply --layers app --state project,provider,group,env,layer
```
2. **Close sshuttle connection (first terminal)**
```
kubectl get pods --all-namespaces
^C
```
3. **Delete Network and Managed layers**
```
tln nginx-ingress-status@k8s
tln deconstruct aws -- --backend cloud --init --apply --layers network,managed --state project,provider,group,env,layer
```
* Use IP address from command output above to check access to the cluster using browser/curl
* Uninstall Ingress
4. **Delete Groupr layer**
```
tln nginx-ingress-uninstall@k8s
tln deconstruct aws -- --backend cloud --init --apply --layers group --state project,provider,group
```
* Close ssh session
5. **Delete Provider layer**
```
^d
tln deconstruct aws -- --backend cloud --init --apply --layers provider --state project,provider
```
* Deconstruct DO Dev infrastructure instance
```
tln deconstruct do -- --backend cloud --plan --apply
```
### Azure
In development
### GCP
In development
## Command line options
General format
```
Expand Down
45 changes: 45 additions & 0 deletions aws/app/dns.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@

locals {
subdomain_name = "${var.env_id}.${var.domain_name}"
}

resource "aws_route53_zone" "secondary" {
name = local.subdomain_name
tags = module.shared.tags
}

resource "aws_route53_record" "ns" {
zone_id = data.aws_route53_zone.primary.zone_id
name = local.subdomain_name
type = "NS"
ttl = "30"
records = aws_route53_zone.secondary.name_servers
}

module "secondary_certificate" {
source = "terraform-aws-modules/acm/aws"
version = "4.5.0"

domain_name = local.subdomain_name
subject_alternative_names = ["*.${local.subdomain_name}"]
zone_id = aws_route53_zone.secondary.zone_id

wait_for_validation = true
}

resource "aws_route53_record" "record" {
for_each = toset(split(",", var.dns_records))
zone_id = var.use_primary_domain ? data.aws_route53_zone.primary.zone_id : aws_route53_zone.secondary.zone_id
name = each.key
type = "A"

alias {
name = data.aws_lb.primary.dns_name
zone_id = data.aws_lb.primary.zone_id
evaluate_target_health = false
}
depends_on = [
helm_release.nginx,
data.aws_lb.primary
]
}
17 changes: 0 additions & 17 deletions aws/app/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,3 @@ module "shared" {
group_id = var.group_id
env_id = var.env_id
}

resource "aws_route53_record" "record" {
for_each = toset(split(",", var.dns_records))
zone_id = data.aws_route53_zone.primary.zone_id
name = each.key
type = "A"

alias {
name = data.aws_lb.primary.dns_name
zone_id = data.aws_lb.primary.zone_id
evaluate_target_health = false
}
depends_on = [
helm_release.nginx,
data.aws_lb.primary
]
}
2 changes: 1 addition & 1 deletion aws/app/nginx.tf
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ resource "helm_release" "nginx" {
}
set {
name = "controller.service.annotations.service\\.beta\\.kubernetes\\.io/aws-load-balancer-ssl-cert"
value = data.aws_acm_certificate.primary.arn
value = var.use_primary_domain ? data.aws_acm_certificate.primary.arn : module.secondary_certificate.acm_certificate_arn
type = "string"
}

Expand Down
3 changes: 3 additions & 0 deletions aws/app/outputs.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
output "use_primary_domain" {
value = var.use_primary_domain
}
4 changes: 2 additions & 2 deletions aws/app/postgres.tf
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ module "rds_pg" {
major_engine_version = "15"
instance_class = var.rds_pg_db_size

allocated_storage = 20
max_allocated_storage = 30
allocated_storage = var.rds_pg_db_allocated_storage
max_allocated_storage = var.rds_pg_max_allocated_storage

db_name = "postgres"
username = "root"
Expand Down
9 changes: 9 additions & 0 deletions aws/app/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,19 @@ variable "domain_name" {
variable "dns_records" {
type = string
}
variable "use_primary_domain" {
type = bool
}

variable "rds_pg_db_size" {
type = string
}
variable "rds_pg_db_allocated_storage" {
type = string
}
variable "rds_pg_max_allocated_storage" {
type = string
}
variable "databases" {
description = "A map of databases, their owners and passwords"
type = map(object({
Expand Down
Loading

0 comments on commit 46d7e5b

Please sign in to comment.