Skip to content

Commit 92d7d0b

Browse files
fix: cleanup code and configurable throughput-pods
1 parent f416c51 commit 92d7d0b

File tree

3 files changed

+54
-19
lines changed

3 files changed

+54
-19
lines changed

tests/tekton-resources/pipelines/eks/awscli-eks-cl2-steady-load.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ spec:
1515
description: "scheduler throughput threshold"
1616
- name: cl2-scheduler-steady-qps
1717
description: "expected PCP tier scheduler qps"
18+
- name: cl2-scheduler-throughput-pods
1819
- name: cl2-scheduler-create-qps
1920
- name: results-bucket
2021
- name: slack-hook
@@ -181,12 +182,16 @@ spec:
181182
value: $(params.cl2-scheduler-steady-qps)
182183
- name: cl2-scheduler-create-qps
183184
value: $(params.cl2-scheduler-create-qps)
185+
- name: cl2-scheduler-throughput-pods
186+
value: $(params.cl2-scheduler-throughput-pods)
184187
- name: nodes
185188
value: $(params.desired-nodes)
186189
- name: results-bucket
187190
value: $(params.results-bucket)
188191
- name: cluster-name
189192
value: $(params.cluster-name)
193+
- name: control-plane-tier
194+
value: $(params.control-plane-tier)
190195
runAfter:
191196
- create-mng-nodes
192197
taskRef:

tests/tekton-resources/tasks/generators/clusterloader/sustained-scheduler-throughput.yaml

Lines changed: 48 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,6 @@ spec:
1313
- name: cl2-branch
1414
description: "The branch of clusterloader2 you want to use"
1515
default: "steady-load"
16-
- name: cl2-scheduler-throughput-pods
17-
default: 500000
1816
- name: cl2-scheduler-throughput-threshold
1917
description: "scheduler throughput threshold"
2018
default: "150"
@@ -33,6 +31,8 @@ spec:
3331
description: The region where the cluster is in.
3432
- name: cluster-name
3533
description: The name of the EKS cluster you want to spin.
34+
- name: control-plane-tier
35+
default: "standard"
3636
results:
3737
- name: datapoint
3838
description: Stores the CL2 result that can be consumed by other tasks (e.g. cloudwatch)
@@ -83,17 +83,57 @@ spec:
8383
script: |
8484
#!/bin/bash
8585
86-
TOTAL_CYCLES=1
86+
TIER=$(params.control-plane-tier)
87+
ITERATIONS=0
88+
89+
case $TIER in
90+
"tier-xl")
91+
ITERATIONS=4
92+
;;
93+
"tier-2xl")
94+
ITERATIONS=8
95+
;;
96+
"tier-4xl")
97+
ITERATIONS=16
98+
;;
99+
*)
100+
echo "Unknown tier: $TIER. Defaulting to 1 iteration."
101+
ITERATIONS=1
102+
;;
103+
esac
104+
87105
cat $(workspaces.source.path)/perf-tests/clusterloader2/testing/scheduler-throughput/config.yaml
88106
cd $(workspaces.source.path)/perf-tests/clusterloader2/
89107
90-
ENABLE_EXEC_SERVICE=false ./clusterloader --kubeconfig=$KUBECONFIG --testconfig=$(workspaces.source.path)/perf-tests/clusterloader2/testing/sustained-scheduler-throughput/config.yaml --testoverrides=$(workspaces.source.path)/overrides.yaml --nodes=$(params.nodes) --provider=eks --report-dir=$(workspaces.results.path) --alsologtostderr --v=2
108+
FAIL_COUNT=0
109+
for ((i=1; i<=ITERATIONS; i++)); do
110+
echo "--- Starting iteration $i of $ITERATIONS ---"
111+
112+
ENABLE_EXEC_SERVICE=false ./clusterloader --kubeconfig=$KUBECONFIG --testconfig=$(workspaces.source.path)/perf-tests/clusterloader2/testing/sustained-scheduler-throughput/config.yaml --testoverrides=$(workspaces.source.path)/overrides.yaml --nodes=$(params.nodes) --provider=eks --report-dir=$(workspaces.results.path) --alsologtostderr --v=2
113+
114+
if [ $? -ne 0 ]; then
115+
echo "Iteration $i failed."
116+
((FAIL_COUNT++))
117+
else
118+
echo "Iteration $i passed."
119+
fi
120+
done
121+
122+
FAILURE_RATE=$(( (FAIL_COUNT * 100) / ITERATIONS ))
123+
124+
echo "------------------------------------------"
125+
echo "Total Iterations: $ITERATIONS"
126+
echo "Total Failures: $FAIL_COUNT"
127+
echo "Failure Rate: $FAILURE_RATE%"
128+
echo "------------------------------------------"
91129
92-
exit_code=$?
93-
if [ $exit_code -eq 0 ]; then
94-
echo "1" | tee $(results.datapoint.path)
130+
exit_code=0
131+
if [ "$FAILURE_RATE" -ge 25 ]; then
132+
echo "Error: Failure rate ($FAILURE_RATE%) exceeded the 25% threshold."
133+
exit_code=1
95134
else
96-
echo "0" | tee $(results.datapoint.path)
135+
echo "Success: Failure rate ($FAILURE_RATE%) is within acceptable limits."
136+
exit_code=0
97137
fi
98138
ls -a $(workspaces.results.path)
99139
cat $(workspaces.results.path)/SchedulingThroughput*

tests/tekton-resources/tasks/setup/eks/awscli-cp-with-pcp.yaml

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ spec:
1212
- name: cluster-name
1313
description: The name of the EKS cluster you want to spin.
1414
- name: kubernetes-version
15-
default: "1.23"
15+
default: "1.33"
1616
description: The EKS version to install.
1717
- name: region
1818
default: "us-west-2"
@@ -27,9 +27,6 @@ spec:
2727
- name: aws-ebs-csi-driver-version
2828
default: release-1.53
2929
description: The release version for aws ebs csi driver.
30-
- name: aws-pod-identity-agent-version
31-
default: v1.3.9-eksbuild.1
32-
description: The release version for aws pod identity agent.
3330
- name: control-plane-tier
3431
default: "standard"
3532
description: "Provisioned Control Plane tier, default to standard"
@@ -116,10 +113,3 @@ spec:
116113
}
117114
}' -n kube-system
118115
kubectl scale --replicas 1000 deploy coredns -n kube-system
119-
# Install EKS Pod Identity Agent
120-
ENDPOINT_FLAG=""
121-
if [ -n "$(params.endpoint)" ]; then
122-
ENDPOINT_FLAG="--endpoint $(params.endpoint)"
123-
fi
124-
aws eks $ENDPOINT_FLAG create-addon --cluster-name $(params.cluster-name) --addon-name eks-pod-identity-agent --addon-version $(params.aws-pod-identity-agent-version)
125-
aws eks $ENDPOINT_FLAG --region $(params.region) wait cluster-active --name $(params.cluster-name)

0 commit comments

Comments
 (0)