Skip to content

Commit 7c102dc

Browse files
authored
Merge pull request #5 from aws-samples/fixes-jan25
Fixes jan25
2 parents b1de27f + d42e3b6 commit 7c102dc

File tree

8 files changed

+39
-23
lines changed

8 files changed

+39
-23
lines changed

lib/alb_controller_iam_policy.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,8 @@
3838
"elasticloadbalancing:DescribeTargetGroups",
3939
"elasticloadbalancing:DescribeTargetGroupAttributes",
4040
"elasticloadbalancing:DescribeTargetHealth",
41-
"elasticloadbalancing:DescribeTags"
41+
"elasticloadbalancing:DescribeTags",
42+
"elasticloadbalancing:DescribeListenerAttributes"
4243
],
4344
"Resource": "*"
4445
},

lib/posit-db.ts

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,12 @@ export class DbStack extends cdk.NestedStack {
2424
reason: 'Adds complexity and will break the solution without Kube restart. As a solution starter this is not required.'
2525
},
2626
])
27+
NagSuppressions.addStackSuppressions(this, [
28+
{
29+
id: 'AwsSolutions-RDS10',
30+
reason: 'Conflicts with stack deletion.'
31+
},
32+
])
2733
NagSuppressions.addStackSuppressions(this, [
2834
{
2935
id: 'AwsSolutions-RDS6',
@@ -52,8 +58,8 @@ export class DbStack extends cdk.NestedStack {
5258
instanceType: ec2.InstanceType.of(ec2.InstanceClass.R6G, ec2.InstanceSize.XLARGE),
5359
}),
5460
vpc: props.vpc,
55-
deletionProtection: true,
56-
removalPolicy: cdk.RemovalPolicy.RETAIN,
61+
deletionProtection: false,
62+
removalPolicy: cdk.RemovalPolicy.DESTROY,
5763
securityGroups: [this.clusterSg]
5864
})
5965

lib/posit-eks.ts

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ export class EksStack extends cdk.NestedStack {
5454
subnetIds: props.vpc.privateSubnets.map(subnet => subnet.subnetId),
5555
endpointPrivateAccess: true
5656
},
57-
version: '1.29', // eks.KubernetesVersion.V1_28.version
57+
version: '1.30',
5858
logging: {
5959
clusterLogging: {
6060
enabledTypes: [
@@ -82,7 +82,6 @@ export class EksStack extends cdk.NestedStack {
8282
assumedBy: new iam.ServicePrincipal('ec2.amazonaws.com'),
8383
managedPolicies: [
8484
iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEKSWorkerNodePolicy'),
85-
// needed at first, otherwise node group doesn't join cluster
8685
iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEKS_CNI_Policy'),
8786
iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEC2ContainerRegistryReadOnly'),
8887
iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonSSMManagedInstanceCore'),

package.json

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@
1414
"@types/jest": "^29.5.8",
1515
"@types/node": "20.9.1",
1616
"@typescript-eslint/eslint-plugin": "^6.21.0",
17-
"aws-cdk": "2.126.0",
18-
"cdk8s-cli": "^2.198.45",
17+
"aws-cdk": "2.173.1",
18+
"cdk8s-cli": "^2.198.248",
1919
"eslint": "^8.56.0",
2020
"eslint-config-standard-with-typescript": "^43.0.1",
2121
"eslint-plugin-import": "^2.29.1",
@@ -27,10 +27,10 @@
2727
"typescript": "^5.3.3"
2828
},
2929
"dependencies": {
30-
"aws-cdk-lib": "2.126.0",
31-
"cdk-nag": "^2.28.149",
30+
"aws-cdk-lib": "2.173.1",
31+
"cdk-nag": "^2.34.23",
3232
"cdk8s": "^2.68.35",
33-
"cdk8s-plus-25": "^2.22.75",
33+
"cdk8s-plus-25": "^2.22.2",
3434
"constructs": "^10.0.0",
3535
"dotenv": "^16.4.1",
3636
"source-map-support": "^0.5.21"

scripts/cert-install.sh

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ source "./scripts/utils.sh"
44
# Define your domain
55
export_env_from_file "./.env"
66
if ! aws eks update-kubeconfig --name $EKS_CLUSTER_NAME; then exit; fi
7-
DOMAIN=$(kubectl get ingress traefik -n traefik -o json | jq -r ".status.loadBalancer.ingress[0].hostname")
7+
export LB_NAME="${EKS_CLUSTER_NAME}-alb"
88

99
# Check if ACM certificate exists for the domain
1010
certificate_arn=$(aws acm list-certificates --query "CertificateSummaryList[?DomainName=='$DOMAIN'].CertificateArn" --output text)
@@ -13,18 +13,18 @@ if [ -n "$certificate_arn" ]; then
1313
echo "Certificate already exists for $DOMAIN with ARN: $certificate_arn"
1414
else
1515
echo "Certificate doesn't exist for $DOMAIN. Generating one..."
16-
openssl genrsa -out "$DOMAIN.key" 2048
17-
openssl req -new -key "$DOMAIN.key" -out "$DOMAIN.csr" -subj "/CN=$DOMAIN"
18-
openssl x509 -req -days 365 -in "$DOMAIN.csr" -signkey "$DOMAIN.key" -out "$DOMAIN.crt"
19-
aws acm import-certificate --certificate fileb://"$DOMAIN.crt" --private-key fileb://"$DOMAIN.key"
16+
openssl genrsa -out "$LB_NAME.key" 2048
17+
openssl req -new -key "$LB_NAME.key" -out "$LB_NAME.csr" -subj "/CN=$DOMAIN"
18+
openssl x509 -req -days 365 -in "$LB_NAME.csr" -signkey "$LB_NAME.key" -out "$LB_NAME.crt"
19+
aws acm import-certificate --certificate fileb://"$LB_NAME.crt" --private-key fileb://"$LB_NAME.key"
2020

2121
rm "$DOMAIN.key" "$DOMAIN.csr" "$DOMAIN.crt"
2222
echo "Certificate has been generated and added to ACM for $DOMAIN"
2323
fi
2424

2525
#Get ALB from domain name
2626
certificate_arn=$(aws acm list-certificates --query "CertificateSummaryList[?DomainName=='$DOMAIN'].CertificateArn" --output text)
27-
alb=$(aws elbv2 describe-load-balancers --query "LoadBalancers[?DNSName=='$DOMAIN'].LoadBalancerArn" --output text)
27+
alb=$(aws elbv2 describe-load-balancers --names $LB_NAME --query 'LoadBalancers[0].LoadBalancerArn' --output text)
2828

2929
#Get data, copy HTTP Rule directly
3030
https_listener=$(aws elbv2 describe-listeners --load-balancer-arn $alb --query "Listeners[?Protocol=='HTTPS'].ListenerArn" --output text)

scripts/manifests/aws-lb-controller-ingress.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ metadata:
77
annotations:
88
alb.ingress.kubernetes.io/backend-protocol: HTTP
99
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}]'
10-
alb.ingress.kubernetes.io/load-balancer-name: posit-sce-alb
10+
alb.ingress.kubernetes.io/load-balancer-name: ${LB_NAME}
1111
alb.ingress.kubernetes.io/scheme: internet-facing
1212
alb.ingress.kubernetes.io/success-codes: 200-404
1313
alb.ingress.kubernetes.io/target-type: instance

scripts/manifests/posit-helm-workbench.yaml

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,11 +25,11 @@ userCreate: true
2525
session:
2626
image:
2727
repository: rstudio/r-session-complete
28-
tagPrefix: ubuntu2204-2023.12.1-402.pro1
28+
tag: ubuntu2204
2929

3030
image:
3131
repository: rstudio/rstudio-workbench
32-
tag: ubuntu2204-2023.12.1-402.pro1
32+
tag: ubuntu2204
3333

3434
service:
3535
type: ClusterIP
@@ -65,6 +65,8 @@ config:
6565
# These settings apply to Jupyter Notebook and JupyterLab IDE sessions
6666
session-cull-minutes: 60
6767
session-shutdown-minutes: 5
68+
vscode.conf:
69+
enabled: 1
6870
profiles:
6971
launcher.kubernetes.profiles.conf:
7072
"*":
@@ -76,3 +78,8 @@ config:
7678
default-mem-mb: "1024"
7779
max-cpus: "12.0"
7880
max-mem-mb: "16384"
81+
prometheus:
82+
enabled: false
83+
legacy: false
84+
prometheusExporter:
85+
enabled: false

scripts/posit-install.sh

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ load_secrets() {
2727

2828
# Function to check the status of the load balancer
2929
check_load_balancer_status() {
30-
lb_status=$(aws elbv2 describe-load-balancers --names 'posit-sce-alb' --query 'LoadBalancers[0].State.Code' --output text)
30+
lb_status=$(aws elbv2 describe-load-balancers --names $LB_NAME --query 'LoadBalancers[0].State.Code' --output text)
3131

3232
# Check if the load balancer is active
3333
if [ "$lb_status" == "active" ]; then
@@ -78,6 +78,7 @@ NC="\e[0m"
7878
set_defaults
7979
export_env_from_file "./.env"
8080
RDS_PARAMS=$(load_secrets $POSTGRES_SECRET)
81+
export LB_NAME="${EKS_CLUSTER_NAME}-alb"
8182

8283
# 1.1 Configure EKS Cluster
8384
printf "${BLUE}------------------------------------------------------${NC} \n"
@@ -119,13 +120,15 @@ kubectl wait --namespace traefik --for=condition=available deployment/traefik --
119120

120121
# 4. Create Ingress (creates LB)
121122
envsubst < scripts/manifests/aws-lb-controller-ingress.yaml | kubectl apply -f -
123+
sleep 5
122124
check_load_balancer_status
123125

124-
export LB=$(kubectl get ingress traefik -n traefik -o json | jq -r ".status.loadBalancer.ingress[0].hostname")
126+
export LB_URL=$(aws elbv2 describe-load-balancers --names $LB_NAME --query 'LoadBalancers[0].DNSName' --output text)
127+
printf "Loadbalancer DNS: ${LB_URL}"
125128
if $domain; then
126129
export DOMAIN=$domain
127130
else
128-
export DOMAIN=$LB
131+
export DOMAIN=$LB_URL
129132
fi
130133

131134
# 5. Setup POSIT PV's
@@ -164,7 +167,7 @@ printf "${BLUE}------------------------------------------------------${NC} \n"
164167
printf "${BLUE}Installing & configuring the Workbench helm chart (Max. 30 seconds) ${NC} \n"
165168
printf "${BLUE}------------------------------------------------------${NC} \n"
166169
kubectl config set-context --current --namespace=posit-workbench
167-
envsubst < ./scripts/manifests/posit-helm-workbench.yaml | helm upgrade --install rstudio-workbench-prod rstudio/rstudio-workbench \
170+
envsubst < ./scripts/manifests/posit-helm-workbench.yaml | helm upgrade --install rstudio-workbench-prod rstudio/rstudio-workbench --version 0.8.9 \
168171
--set license.key="${PWB_LICENSE}" \
169172
--set config.secret.'database\.conf'.password="${POSTGRES_PASSWORD}" \
170173
-f -

0 commit comments

Comments
 (0)