diff --git a/.github/workflows/automatic.yml b/.github/workflows/automatic.yml index 68ff24d..c255fe9 100644 --- a/.github/workflows/automatic.yml +++ b/.github/workflows/automatic.yml @@ -78,7 +78,7 @@ jobs: id: plan run: | set -o pipefail -ex - terraform plan -compact-warnings -no-color -input=false -lock-timeout=5m -out tfplan 2>&1 \ + terraform plan -compact-warnings -no-color -input=false -lock=false -out tfplan 2>&1 \ | tee terraform_log terraform show -json tfplan > tfplan.json @@ -154,7 +154,7 @@ jobs: id: plan-cost run: | set -o pipefail -ex - terraform plan -compact-warnings -no-color -input=false -lock-timeout=5m -out tfplan-cost 2>&1 + terraform plan -compact-warnings -no-color -input=false -lock=false -out tfplan-cost 2>&1 terraform show -json tfplan-cost > tfplan-cost.json - uses: overmindtech/cost-signals-action@v1 diff --git a/modules/scenarios/memory-optimization/networking.tf b/modules/scenarios/memory-optimization/networking.tf index 488c826..4f0528f 100644 --- a/modules/scenarios/memory-optimization/networking.tf +++ b/modules/scenarios/memory-optimization/networking.tf @@ -58,6 +58,34 @@ resource "aws_lb_target_group" "app" { }) } +# Blackhole Target Group - Empty target group for DNS outage simulation +resource "aws_lb_target_group" "blackhole" { + count = var.enabled ? 1 : 0 + name = "${local.name_prefix}-tg-blackhole" + port = var.application_port + protocol = "HTTP" + vpc_id = local.vpc_id + target_type = "ip" + + health_check { + enabled = true + healthy_threshold = 5 + unhealthy_threshold = 2 + timeout = 5 + interval = 60 + path = "/" + matcher = "200" + port = "traffic-port" + protocol = "HTTP" + } + + tags = merge(local.common_tags, { + Name = "${local.name_prefix}-tg-blackhole" + Purpose = "risk-test" + Mode = "blackhole" + }) +} + # ALB Listener resource "aws_lb_listener" "app" { count = var.enabled ? 1 : 0 @@ -67,12 +95,7 @@ resource "aws_lb_listener" "app" { default_action { type = "forward" - - forward { - target_group { - arn = aws_lb_target_group.app[0].arn - } - } + target_group_arn = aws_lb_target_group.blackhole[0].arn } tags = merge(local.common_tags, { diff --git a/modules/scenarios/memory-optimization/outputs.tf b/modules/scenarios/memory-optimization/outputs.tf index 7ea771e..d38bc6e 100644 --- a/modules/scenarios/memory-optimization/outputs.tf +++ b/modules/scenarios/memory-optimization/outputs.tf @@ -7,6 +7,16 @@ output "alb_url" { value = var.enabled ? "http://${aws_lb.app[0].dns_name}" : null } +output "alb_dns_name" { + description = "DNS name of the ALB" + value = var.enabled ? aws_lb.app[0].dns_name : null +} + +output "alb_zone_id" { + description = "Zone ID of the ALB" + value = var.enabled ? aws_lb.app[0].zone_id : null +} + output "demo_status" { description = "Object showing current vs required memory, cost calculations, and risk assessment" value = var.enabled ? { diff --git a/modules/scenarios/route53_blackhole.tf b/modules/scenarios/route53_blackhole.tf new file mode 100644 index 0000000..f31cf87 --- /dev/null +++ b/modules/scenarios/route53_blackhole.tf @@ -0,0 +1,21 @@ +# Route53 DNS record for blackhole scenario testing +# This simulates DNS endpoint going dark by pointing to ALB with empty target group +# No failover, no health checks - mimics AWS DNS outage scenario + +resource "aws_route53_record" "blackhole" { + count = var.enable_memory_optimization_demo ? 1 : 0 + zone_id = data.aws_route53_zone.demo.zone_id + name = "blackhole-${var.example_env}.${data.aws_route53_zone.demo.name}" + type = "A" + + alias { + name = module.memory_optimization.alb_dns_name + zone_id = module.memory_optimization.alb_zone_id + evaluate_target_health = false + } + + # TTL is ignored for alias records but included for documentation + # High TTL (300s = 5 minutes) indicates no failover capability + # No health check evaluation - mimics DNS endpoint going dark +} +