diff --git a/tests/assets/eks-pod-identity/config.yaml b/tests/assets/eks-pod-identity/config.yaml new file mode 100644 index 00000000..f62bae8b --- /dev/null +++ b/tests/assets/eks-pod-identity/config.yaml @@ -0,0 +1,84 @@ +{{$namespacePrefix := DefaultParam .CL2_NAMESPACE_PREFIX "default"}} +{{$namespaceCount := DefaultParam .CL2_NAMESPACE_COUNT 1}} +{{$totalEksPodIdentityPods := DefaultParam .CL2_EKS_POD_IDENTITY_PODS 5000}} +{{$timeoutEksPodIdentityPodCreation := DefaultParam .CL2_TIMEOUT_EKS_POD_IDENTITY_POD_CREATION "5m"}} +{{$defaultQps := DefaultParam .CL2_DEFAULT_QPS 500}} +{{$defaultBurst := DefaultParam .CL2_DEFAULT_BURST 1000}} +{{$uniformQps := DefaultParam .CL2_UNIFORM_QPS 500}} + +{{$SCHEDULER_THROUGHPUT_THRESHOLD := DefaultParam .CL2_SCHEDULER_THROUGHPUT_THRESHOLD 100}} + +name: eks-pod-identity +tuningSets: +# default is a tuningset that is meant to be used when we don't have any specific requirements on pace of operations. +- name: default + globalQPSLoad: + qps: {{$defaultQps}} + burst: {{$defaultBurst}} +- name: UniformQPS + qpsLoad: + qps: {{$uniformQps}} +steps: +- name: Creating eks pod identity measurements + measurements: + - Identifier: EksPodIdentityPodStartupLatency + Method: PodStartupLatency + Params: + action: start + labelSelector: group = eks-pod-identity + threshold: 300s + - Identifier: EksPodIdentity +# TODO: Move to SchedulingThroughputPrometheus which requires cl2 prom stack setup as pre-req + Method: SchedulingThroughput + Params: + action: start + labelSelector: group = eks-pod-identity + measurmentInterval: 1s +# a pod identity association with (namespace: default, sa: default) is created as prerequisite +- name: create eks pod identity pods + phases: + - namespaceRange: + min: 1 + max: {{$namespaceCount}} + baseName: {{$namespacePrefix}} + replicasPerNamespace: {{$totalEksPodIdentityPods}} + tuningSet: UniformQPS + objectBundle: + - basename: eks-pod-identity + objectTemplatePath: pod-default.yaml + templateFillMap: + Group: eks-pod-identity +- name: Waiting for eks pod identity pods to be created + measurements: + - Identifier: WaitForEksPodIdentityPods + Method: WaitForRunningPods + Params: + action: gather + timeout: {{$timeoutEksPodIdentityPodCreation}} + desiredPodCount: {{$totalEksPodIdentityPods}} + labelSelector: group = eks-pod-identity +- name: Collecting eks pod identity measurements + measurements: + - Identifier: EksPodIdentityPodStartupLatency + Method: PodStartupLatency + Params: + action: gather + - Identifier: EksPodIdentity + Method: SchedulingThroughput + Params: + action: gather + enableViolations: true + threshold: {{$SCHEDULER_THROUGHPUT_THRESHOLD}} +- name: Delete eks pod identity pods + phases: + - namespaceRange: + min: 1 + max: {{$namespaceCount}} + baseName: {{$namespacePrefix}} + replicasPerNamespace: 0 + tuningSet: default + objectBundle: + - basename: eks-pod-identity + objectTemplatePath: pod-default.yaml + templateFillMap: + Group: eks-pod-identity diff --git a/tests/assets/eks-pod-identity/pia-trust-policy.json b/tests/assets/eks-pod-identity/pia-trust-policy.json new file mode 100644 index 00000000..49458bfa --- /dev/null +++ b/tests/assets/eks-pod-identity/pia-trust-policy.json @@ -0,0 +1,15 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "beta.pods.eks.aws.internal" + }, + "Action": [ + "sts:AssumeRole", + "sts:TagSession" + ] + } + ] +} diff --git a/tests/assets/eks-pod-identity/pod-default.yaml b/tests/assets/eks-pod-identity/pod-default.yaml new file mode 100644 index 00000000..4816cd87 --- /dev/null +++ b/tests/assets/eks-pod-identity/pod-default.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: eks-pod-identity-pod-churn- + labels: + group: {{.Group}} +spec: + containers: + - image: registry.k8s.io/pause:3.9 + name: pause + initContainers: + - name: app-init + image: amazon/aws-cli:latest + command: ["/bin/sh"] + args: ["-c", "aws sts get-caller-identity"] diff --git a/tests/tekton-resources/pipelines/eks/awscli-cl2-load-with-addons-slos.yaml b/tests/tekton-resources/pipelines/eks/awscli-cl2-load-with-addons-slos.yaml index e96bb90c..a0be9a2f 100644 --- a/tests/tekton-resources/pipelines/eks/awscli-cl2-load-with-addons-slos.yaml +++ b/tests/tekton-resources/pipelines/eks/awscli-cl2-load-with-addons-slos.yaml @@ -21,6 +21,8 @@ spec: value: $(params.cluster-name)-node-role - name: launch-template-stack-name value: $(params.cluster-name)-launch-template + - name: namespace-count + value: $(params.namespace-count) retries: 10 taskRef: kind: Task @@ -61,6 +63,29 @@ spec: - default: https://raw.githubusercontent.com/awslabs/kubernetes-iteration-toolkit/main/tests/assets/eks_node_role.json name: node-role-cfn-url type: string + - name: namespace-prefix + default: "default" + description: "The prefix of namespaces for EKS Pod Identity test." + - name: namespace-count + default: "1" + description: "The number of namespaces for EKS Pod Identity test." + - name: pia-trust-policy-url + default: "https://raw.githubusercontent.com/awslabs/kubernetes-iteration-toolkit/main/tests/assets/eks-pod-identity/pia-trust-policy.json" + type: string + - name: pia-test-config-url + default: "https://raw.githubusercontent.com/awslabs/kubernetes-iteration-toolkit/main/tests/assets/eks-pod-identity/eks-pod-identity/config.yaml" + - name: pia-test-pod-spec-url + default: "https://raw.githubusercontent.com/awslabs/kubernetes-iteration-toolkit/main/tests/assets/eks-pod-identity/eks-pod-identity/pod-default.yaml" + - name: cl2-eks-pod-identity-pods + default: "5000" + - name: cl2-default-qps + default: "200" + - name: cl2-default-burst + default: "400" + - name: cl2-uniform-qps + default: "200" + - name: timeout-pia-pod-creation + default: "10m" tasks: - name: slack-notification params: @@ -193,6 +218,66 @@ spec: workspaces: - name: config workspace: config + - name: create-pod-identity-association + params: + - name: cluster-name + value: $(params.cluster-name) + - name: endpoint + value: $(params.endpoint) + - name: namespace-prefix + value: $(params.namespace-prefix) + - name: namespace-count + value: $(params.namespace-count) + - name: pia-trust-policy-url + value: $(params.pia-trust-policy-url) + runAfter: + - create-mng-nodes + taskRef: + kind: Task + name: awscli-eks-pia-create + workspaces: + - name: config + workspace: config + - name: generate-eks-pod-identity + params: + - name: cl2-eks-pod-identity-pods + value: $(params.cl2-eks-pod-identity-pods) + - name: cl2-default-qps + value: $(params.cl2-default-qps) + - name: cl2-default-burst + value: $(params.cl2-default-burst) + - name: cl2-uniform-qps + value: $(params.cl2-uniform-qps) + - name: results-bucket + value: $(params.results-bucket) + - name: nodes + value: $(params.desired-nodes) + - name: cluster-name + value: $(params.cluster-name) + - name: namespace-prefix + value: $(params.namespace-prefix) + - name: namespace-count + value: $(params.namespace-count) + - name: pia-test-config-url + value: $(params.pia-test-config-url) + - name: pia-test-pod-spec-url + value: $(params.pia-test-pod-spec-url) + - name: timeout-pia-pod-creation + value: $(params.timeout-pia-pod-creation) + - name: amp-workspace-id + value: '$(params.amp-workspace-id)' + runAfter: + - create-pod-identity-association + taskRef: + kind: Task + name: load-pod-identity + workspaces: + - name: source + workspace: source + - name: results + workspace: results + - name: config + workspace: config - name: generate params: - name: cluster-name @@ -210,7 +295,7 @@ spec: - name: amp-workspace-id value: $(params.amp-workspace-id) runAfter: - - create-mng-nodes + - generate-eks-pod-identity taskRef: kind: Task name: load-slos @@ -230,11 +315,11 @@ spec: - name: namespace value: $(params.kubernetes-version) runAfter: - - generate + - generate-eks-pod-identity taskRef: kind: Task name: cloudwatch workspaces: - name: source - name: results - - name: config \ No newline at end of file + - name: config diff --git a/tests/tekton-resources/tasks/generators/clusterloader/load-pod-identity.yaml b/tests/tekton-resources/tasks/generators/clusterloader/load-pod-identity.yaml new file mode 100644 index 00000000..d4936f78 --- /dev/null +++ b/tests/tekton-resources/tasks/generators/clusterloader/load-pod-identity.yaml @@ -0,0 +1,177 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: load-pod-identity + namespace: scalability +spec: + description: "clusterloader2 task to run various types of cl2 tests on a given cluster." + params: + - name: giturl + description: "git url to clone the package" + default: https://github.com/kubernetes/perf-tests.git + - name: cl2-branch + description: "The branch of clusterloader2 you want to use" + default: "master" + - name: cl2-eks-pod-identity-pods + description: "pods for testing eks pod identity service" + default: "2000" + - name: cl2-default-qps + description: "default qps" + default: "200" + - name: cl2-default-burst + description: "default burst" + default: "400" + - name: cl2-uniform-qps + description: "uniform qps" + default: "200" + - name: nodes + description: "number of dataplane nodes to run the load test against" + default: "800" + - name: results-bucket + description: "Results bucket with path of s3 to upload results" + - name: region + default: "us-west-2" + description: The region where the cluster is in. + - name: cluster-name + description: "The name of the EKS cluster you want to spin" + - name: namespace-prefix + default: "default" + description: "The prefix of namespaces for EKS Pod Identity test." + - name: namespace-count + default: "1" + description: "The number of namespaces for EKS Pod Identity test" + - name: pia-test-config-url + default: "https://raw.githubusercontent.com/awslabs/kubernetes-iteration-toolkit/main/tests/assets/eks-pod-identity/eks-pod-identity/config.yaml" + - name: pia-test-pod-spec-url + default: "https://raw.githubusercontent.com/awslabs/kubernetes-iteration-toolkit/main/tests/assets/eks-pod-identity/eks-pod-identity/pod-default.yaml" + - name: timeout-pia-pod-creation + default: "5m" + - name: amp-workspace-id + description: The AMP workspace ID where remote write needs to happen. + default: "" + results: + - name: datapoint + description: Stores the CL2 result that can be consumed by other tasks (e.g. cloudwatch) + - name: s3_result + description: Stores the S3 result path after compute + workspaces: + - name: source + mountPath: /src/k8s.io/ + - name: results + - name: config + mountPath: /config/ + stepTemplate: + env: + - name: KUBECONFIG + value: /config/kubeconfig + steps: + - name: git-clone + image: alpine/git + workingDir: $(workspaces.source.path) + script: | + git clone $(params.giturl) + cd $(workspaces.source.path)/perf-tests/ + git fetch origin --verbose --tags + git checkout $(params.cl2-branch) + git branch + - name: prepare-loadtest + image: golang:1.23 + workingDir: $(workspaces.source.path) + script: | + S3_RESULT_PATH=$(params.results-bucket) + echo $S3_RESULT_PATH > $(results.s3_result.path) + echo "S3 Path: $S3_RESULT_PATH" + cat > "$(workspaces.source.path)/overrides.yaml" <> $(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/prometheus-prometheus.yaml + containers: + - name: aws-sigv4-proxy-sidecar + image: public.ecr.aws/aws-observability/aws-sigv4-proxy:1.0 + args: + - --name + - aps + - --region + - $(params.region) + - --host + - aps-workspaces.$(params.region).amazonaws.com + - --port + - :8005 + ports: + - name: aws-sigv4-proxy + containerPort: 8005 + remoteWrite: + - url: http://localhost:8005/workspaces/$(params.amp-workspace-id)/api/v1/remote_write + queueConfig: + capacity: 2500 + maxSamplesPerSend: 1000 + maxShards: 200 + externalLabels: + cluster_name: $(params.cluster-name) + s3_path: $S3_RESULT_PATH + EOF + cat $(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/prometheus-prometheus.yaml + cat << EOF >> $(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/0prometheus-operator-deployment.yaml + tolerations: + - key: monitoring + operator: Exists + effect: NoSchedule + EOF + cat $(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/0prometheus-operator-deployment.yaml + fi + # Building clusterloader2 binary + cd $(workspaces.source.path)/perf-tests/clusterloader2/ + GOOS=linux CGO_ENABLED=0 go build -v -o ./clusterloader ./cmd + - name: run-loadtest + image: alpine/k8s:1.30.2 + onError: continue + script: | + #!/bin/bash + if [ -n "$(params.amp-workspace-id)" ]; then + export ENABLE_PROMETHEUS_SERVER=true + export PROMETHEUS_PVC_STORAGE_CLASS=gp2 + export PROMETHEUS_SCRAPE_KUBE_PROXY=false + export PROMETHEUS_KUBE_PROXY_SELECTOR_KEY=k8s-app + export PROMETHEUS_SCRAPE_APISERVER_ONLY=true + fi + + #prepare eks pod identity load test config + mkdir -p $(workspaces.source.path)/perf-tests/clusterloader2/testing/eks-pod-identity + curl -s $(params.pia-test-config-url) -o $(workspaces.source.path)/perf-tests/clusterloader2/testing/eks-pod-identity/config.yaml + curl -s $(params.pia-test-pod-spec-url) -o $(workspaces.source.path)/perf-tests/clusterloader2/testing/eks-pod-identity/pod-default.yaml + cat $(workspaces.source.path)/perf-tests/clusterloader2/testing/eks-pod-identity/config.yaml + cat $(workspaces.source.path)/perf-tests/clusterloader2/testing/eks-pod-identity/pod-default.yaml + cd $(workspaces.source.path)/perf-tests/clusterloader2/ + + ENABLE_EXEC_SERVICE=false ./clusterloader --kubeconfig=$KUBECONFIG --testconfig=$(workspaces.source.path)/perf-tests/clusterloader2/testing/eks-pod-identity/config.yaml --testoverrides=$(workspaces.source.path)/overrides.yaml --nodes=$(params.nodes) --provider=eks --report-dir=$(workspaces.results.path) --alsologtostderr --v=2 + exit_code=$? + if [ $exit_code -eq 0 ]; then + echo "1" | tee $(results.datapoint.path) + else + echo "0" | tee $(results.datapoint.path) + fi + exit $exit_code + timeout: 30000s + - name: upload-results + image: amazon/aws-cli + workingDir: $(workspaces.results.path) + script: | + S3_RESULT_PATH=$(cat $(results.s3_result.path)) + echo "S3 Path: $S3_RESULT_PATH" + aws sts get-caller-identity + # we expect to see all files from loadtest that clusterloader2 outputs here in this dir + ls -larth + aws s3 cp . s3://$S3_RESULT_PATH/ --recursive diff --git a/tests/tekton-resources/tasks/setup/eks/awscli-cp-with-vpc.yaml b/tests/tekton-resources/tasks/setup/eks/awscli-cp-with-vpc.yaml index 2751af91..a04c507d 100644 --- a/tests/tekton-resources/tasks/setup/eks/awscli-cp-with-vpc.yaml +++ b/tests/tekton-resources/tasks/setup/eks/awscli-cp-with-vpc.yaml @@ -3,11 +3,11 @@ apiVersion: tekton.dev/v1beta1 kind: Task metadata: name: awscli-eks-cluster-create-with-vpc-stack - namespace: scalability + namespace: scalability spec: description: | Create an EKS cluster. - This Task can be used to create an EKS cluster for a given service role, region in an AWS account + This Task can be used to create an EKS cluster for a given service role, region in an AWS account params: - name: cluster-name description: The name of the EKS cluster you want to spin. @@ -27,6 +27,9 @@ spec: - name: aws-ebs-csi-driver-version default: release-1.13 description: The release version for aws ebs csi driver. + - name: aws-pod-identity-agent-version + default: v1.3.5-eksbuild.2 + description: The release version for aws pod identity agent. workspaces: - name: config mountPath: /config/ @@ -53,11 +56,11 @@ spec: echo "subnets=$subnets" sg=$(aws cloudformation --region $(params.region) describe-stacks --stack-name $(params.vpc-stack-name) --query='Stacks[].Outputs[?OutputKey==`SecurityGroups`].OutputValue' --output text) echo "securitygroup=$sg" - + if [ "$CREATED_CLUSTER" == "" ]; then aws eks create-cluster --name $(params.cluster-name) --region $(params.region) --kubernetes-version $(params.kubernetes-version) --role-arn $SERVICE_ROLE_ARN --resources-vpc-config subnetIds=$subnets,securityGroupIds=$sg $ENDPOINT_FLAG fi - aws eks $ENDPOINT_FLAG --region $(params.region) wait cluster-active --name $(params.cluster-name) + aws eks $ENDPOINT_FLAG --region $(params.region) wait cluster-active --name $(params.cluster-name) - name: write-kubeconfig image: alpine/k8s:1.23.7 script: | @@ -69,8 +72,8 @@ spec: - name: install-addons-and-validate image: alpine/k8s:1.23.7 script: | - # enable PD on the cluster - kubectl api-versions + # enable PD on the cluster + kubectl api-versions kubectl api-resources # Apiserver is not recongnizing deamonset for sometime inconsistently, need to see if livez/readyz are healthy while true; do date && kubectl get --raw "/readyz" --v=10 && break ; sleep 5; done @@ -110,11 +113,10 @@ spec: } }' -n kube-system kubectl scale --replicas 1000 deploy coredns -n kube-system - #ToDo - remove these comments after experimentation - # Install EKS Pod Identity Agent - # ENDPOINT_FLAG="" - # if [ -n "$(params.endpoint)" ]; then - # ENDPOINT_FLAG="--endpoint $(params.endpoint)" - # fi - # aws eks $ENDPOINT_FLAG create-addon --cluster-name $(params.cluster-name) --addon-name eks-pod-identity-agent --addon-version v1.0.0-eksbuild.1 - # aws eks $ENDPOINT_FLAG --region $(params.region) wait cluster-active --name $(params.cluster-name) \ No newline at end of file + # Install EKS Pod Identity Agent + ENDPOINT_FLAG="" + if [ -n "$(params.endpoint)" ]; then + ENDPOINT_FLAG="--endpoint $(params.endpoint)" + fi + aws eks $ENDPOINT_FLAG create-addon --cluster-name $(params.cluster-name) --addon-name eks-pod-identity-agent --addon-version $(params.aws-pod-identity-agent-version) + aws eks $ENDPOINT_FLAG --region $(params.region) wait cluster-active --name $(params.cluster-name) diff --git a/tests/tekton-resources/tasks/setup/eks/awscli-pod-identity-association.yaml b/tests/tekton-resources/tasks/setup/eks/awscli-pod-identity-association.yaml new file mode 100644 index 00000000..a569c702 --- /dev/null +++ b/tests/tekton-resources/tasks/setup/eks/awscli-pod-identity-association.yaml @@ -0,0 +1,75 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: awscli-eks-pia-create + namespace: scalability +spec: + description: | + Create an EKS Pod Identity Association for a given cluster. + This Task can be used to create an EKS Pod Identity Association for namespace default and service account default. + params: + - name: cluster-name + description: The name of the EKS cluster you want to create an EKS Pod Identity Association for. + - name: region + default: "us-west-2" + description: The region where the cluster is in. + - name: endpoint + default: "" + - name: namespace-prefix + default: "default" + description: "The prefix of namespaces for EKS Pod Identity test." + - name: namespace-count + default: "1" + description: "The number of namespaces for EKS Pod Identity test." + - name: pia-trust-policy-url + default: "https://raw.githubusercontent.com/awslabs/kubernetes-iteration-toolkit/main/tests/assets/eks-pod-identity/pia-trust-policy.json" + workspaces: + - name: config + mountPath: /config/ + stepTemplate: + env: + - name: KUBECONFIG + value: /config/kubeconfig + steps: + - name: write-kubeconfig + image: alpine/k8s:1.31.5 + script: | + ENDPOINT_FLAG="" + if [ -n "$(params.endpoint)" ]; then + ENDPOINT_FLAG="--endpoint $(params.endpoint)" + fi + aws eks $ENDPOINT_FLAG update-kubeconfig --name $(params.cluster-name) --region $(params.region) + - name: create-pia + image: alpine/k8s:1.31.5 + script: | + ENDPOINT_FLAG="" + if [ -n "$(params.endpoint)" ]; then + ENDPOINT_FLAG="--endpoint $(params.endpoint)" + fi + + MANAGED_POLICY_ARN="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess" + TRUST_POLICY_FILE="pia-trust-policy.json" + # create a trust policy json file + curl -s $(params.pia-trust-policy-url) -o ./$TRUST_POLICY_FILE + for i in $(seq 1 $(params.namespace-count)); do + kubectl create namespace $(params.namespace-prefix)-$i + + PIA_ROLE_NAME=$(params.cluster-name)-pia-role-$i + aws iam create-role --role-name $PIA_ROLE_NAME --assume-role-policy-document file://$TRUST_POLICY_FILE + aws iam attach-role-policy --role-name $PIA_ROLE_NAME --policy-arn $MANAGED_POLICY_ARN + PIA_ROLE_ARN=$(aws iam get-role --role-name $PIA_ROLE_NAME --query 'Role.Arn' --output text) + echo "$PIA_ROLE_ARN is created" + + aws eks $ENDPOINT_FLAG --region $(params.region) create-pod-identity-association \ + --cluster-name $(params.cluster-name) \ + --namespace $(params.namespace-prefix)-$i \ + --service-account default \ + --role-arn $PIA_ROLE_ARN + done + + aws eks $ENDPOINT_FLAG --region $(params.region) list-pod-identity-associations --cluster-name $(params.cluster-name) --query 'associations' + + echo "waiting for 30 seconds..." + sleep 30 + echo "resuming execution..." diff --git a/tests/tekton-resources/tasks/teardown/awscli-eks.yaml b/tests/tekton-resources/tasks/teardown/awscli-eks.yaml index f78b17ed..ad6cb1f6 100644 --- a/tests/tekton-resources/tasks/teardown/awscli-eks.yaml +++ b/tests/tekton-resources/tasks/teardown/awscli-eks.yaml @@ -16,6 +16,9 @@ spec: description: The region where the cluster is in. - name: endpoint default: "" + - name: namespace-count + description: The number of namespaces for EKS Pod Identity test. + default: "0" - name: slack-hook default: "" - name: slack-message @@ -43,6 +46,19 @@ spec: echo "Waiting for cluster to be deleted..." aws eks wait cluster-deleted --name $(params.cluster-name) --region $(params.region) $ENDPOINT_FLAG echo "Cluster is deleted..." + + MANAGED_POLICY_ARN="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess" + for i in $(seq 1 $(params.namespace-count)); do + PIA_ROLE_NAME=$(params.cluster-name)-pia-role-$i + PIA_ROLE_EXISTS=$(aws iam get-role --role-name $PIA_ROLE_NAME --query 'Role.RoleName' --output text 2>/dev/null) + if [ "$PIA_ROLE_EXISTS" == "$PIA_ROLE_NAME" ]; then + aws iam detach-role-policy --role-name $PIA_ROLE_NAME --policy-arn $MANAGED_POLICY_ARN + aws iam delete-role --role-name $PIA_ROLE_NAME + echo "Role $PIA_ROLE_NAME deleted successfully." + else + echo "Role $PIA_ROLE_NAME does not exist, no action needed." + fi + done - name: teardown-eks-role-stack image: alpine/k8s:1.23.7 script: | @@ -59,7 +75,7 @@ spec: image: alpine/k8s:1.23.7 script: | #!/bin/bash - aws sts get-caller-identity + aws sts get-caller-identity # Check if the stack exists aws cloudformation --region $(params.region) describe-stacks --stack-name $(params.cluster-name) if [ $? -ne 0 ]; then @@ -69,7 +85,7 @@ spec: echo "Deleting stack $(params.cluster-name)..." fi #Deletes the CFN stack - aws cloudformation delete-stack --region $(params.region) --stack-name $(params.cluster-name) + aws cloudformation delete-stack --region $(params.region) --stack-name $(params.cluster-name) # Wait for the stack to be deleted aws cloudformation wait stack-delete-complete --region $(params.region) --stack-name $(params.cluster-name) - echo "Stack deleted successfully!" \ No newline at end of file + echo "Stack deleted successfully!"