Skip to content

Commit e63d40d

Browse files
authored
Merge pull request #1708 from khansaad/bulk-validations-test
Add Bulk validations test
2 parents 81a6178 + 52603bb commit e63d40d

File tree

3 files changed

+141
-19
lines changed

3 files changed

+141
-19
lines changed

tests/scripts/helpers/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,7 @@
122122
DELETE_PERF_PROFILE_MISSING_NAME_ERROR = "Performance profile name is required."
123123
DELETE_PERF_PROFILE_NON_EXISTENT_NAME_ERROR = "Not Found: performance_profile does not exist: %s"
124124
DELETE_PERF_PROFILE_EXPERIMENT_ASSOCIATION_ERROR = "Performance Profile '%s' cannot be deleted as it is currently associated with %d experiment."
125+
DATASOURCE_NOT_SERVICEABLE = "Datasource is not serviceable."
125126

126127

127128
# Kruize Recommendations Notification codes
@@ -223,7 +224,6 @@
223224

224225
NAMESPACE_EXPERIMENT_TYPE = "namespace"
225226
CONTAINER_EXPERIMENT_TYPE = "container"
226-
PERF_PROFILE_NAME = "resource-optimization-openshift"
227227

228228
# version,experiment_name,cluster_name,performance_profile,mode,target_cluster,type,name,namespace,container_image_name,container_name,measurement_duration,threshold
229229
create_exp_test_data = {
@@ -1898,7 +1898,7 @@ def validate_accelerator_recommendations_for_container(recommendations_json):
18981898
def validate_job_status(job_id, base_url, caplog):
18991899
# Common keys expected in both responses
19001900
common_keys = {
1901-
"status", "total_experiments", "processed_experiments", "job_id", "job_start_time", "job_end_time"
1901+
"status", "total_experiments", "processed_experiments", "job_id", "job_start_time"
19021902
}
19031903

19041904
# Extra keys expected when verbose=true

tests/scripts/local_monitoring_tests/Local_monitoring_tests.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,11 @@ Here are the test scenarios:
152152
- Sample JSON Payload: Verifies the API correctly processes a structured payload and generates a job_id.
153153
- Verify the response of the GET job status API for the generated job_id.
154154
- Tests both verbose=false and verbose=true GET requests for comprehensive verification.
155+
- Validate bulk API response by passing a valid and multiple invalid time range values.
156+
- Job_id will be generated in case of the valid scenario
157+
- Corresponding error message will be sent back in case of invalid scenario with Response code 400.
158+
- Validate bulk API response by passing an invalid datasource name in the input JSON.
159+
- Error message will be sent back with the response code 400
155160

156161
## Prerequisites for running the tests:
157162
- Minikube setup or access to Openshift cluster

tests/scripts/local_monitoring_tests/rest_apis/test_bulkAPI.py

Lines changed: 134 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -35,28 +35,23 @@
3535
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
3636
logger = logging.getLogger(__name__)
3737

38+
# Base valid payload generator
39+
def base_payload():
40+
return {
41+
"filter": {
42+
"exclude": {"namespace": [], "workload": [], "containers": [], "labels": {}},
43+
"include": {"namespace": [], "workload": [], "containers": [], "labels": {}}
44+
},
45+
"metadata_profile": "cluster-metadata-local-monitoring",
46+
"measurement_duration": "15mins",
47+
"time_range": {}
48+
}
3849

3950
@pytest.mark.test_bulk_api_ros
4051
@pytest.mark.sanity
4152
@pytest.mark.parametrize("bulk_request_payload, expected_job_id_present", [
4253
({}, True), # Test with an empty payload to check if a job_id is created.
43-
({
44-
"filter": {
45-
"exclude": {
46-
"namespace": [],
47-
"workload": [],
48-
"containers": [],
49-
"labels": {}
50-
},
51-
"include": {
52-
"namespace": [],
53-
"workload": [],
54-
"containers": [],
55-
"labels": {}
56-
}
57-
},
58-
"time_range": {}
59-
}, True) # Test with a sample payload with some JSON content
54+
(base_payload(),True) # Test with a sample payload with some JSON content
6055
])
6156
def test_bulk_post_request(cluster_type, bulk_request_payload, expected_job_id_present, caplog):
6257
form_kruize_url(cluster_type)
@@ -104,3 +99,125 @@ def test_bulk_post_request(cluster_type, bulk_request_payload, expected_job_id_p
10499
if job_id_present:
105100
validate_job_status(response.json()["job_id"], URL, caplog)
106101

102+
103+
@pytest.mark.test_bulk_api_ros
104+
@pytest.mark.parametrize("start, end, expected_error", [
105+
("2025-01-01T12:00:00Z", "2025-01-02T12:00:00Z", "Valid"), # Valid scenario
106+
("", "", "Invalid date format. Must follow ISO 8601 format (YYYY-MM-DDTHH:mm:ss.sssZ) for the jobId:"), # empty
107+
("2024-01-01 10:00:00", "2024-01-01T12:00:00Z", "Invalid date format. Must follow ISO 8601 format (YYYY-MM-DDTHH:mm:ss.sssZ) for the jobId:"), # bad format
108+
("2025-01-02T12:00:00Z", "2025-01-01T12:00:00Z", "Start time should be before end time for the jobId:"), # start > end
109+
])
110+
def test_bulk_api_time_range_validation(cluster_type, start, end, expected_error, caplog):
111+
"""
112+
Validates all negative time-range scenarios for Bulk API.
113+
"""
114+
form_kruize_url(cluster_type)
115+
URL = get_kruize_url()
116+
payload = base_payload()
117+
118+
payload["time_range"]["start"] = start
119+
payload["time_range"]["end"] = end
120+
121+
delete_and_create_metric_profile()
122+
123+
# list and validate default metric profile
124+
metric_profile_input_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json'
125+
json_data = json.load(open(metric_profile_input_json_file))
126+
metric_profile_name = json_data['metadata']['name']
127+
128+
response = list_metric_profiles(name=metric_profile_name, logging=False)
129+
metric_profile_json = response.json()
130+
131+
assert response.status_code == SUCCESS_200_STATUS_CODE
132+
133+
errorMsg = validate_list_metric_profiles_json(metric_profile_json, list_metric_profiles_schema)
134+
assert errorMsg == ""
135+
136+
delete_and_create_metadata_profile()
137+
138+
# list and validate default metadata profile
139+
metadata_profile_input_json_file = metadata_profile_dir / 'bulk_cluster_metadata_local_monitoring.json'
140+
json_data = json.load(open(metadata_profile_input_json_file))
141+
metadata_profile_name = json_data['metadata']['name']
142+
143+
response = list_metadata_profiles(name=metadata_profile_name, logging=False)
144+
metadata_profile_json = response.json()
145+
146+
assert response.status_code == SUCCESS_200_STATUS_CODE
147+
148+
errorMsg = validate_list_metadata_profiles_json(metadata_profile_json, list_metadata_profiles_schema)
149+
assert errorMsg == ""
150+
if expected_error == "Valid":
151+
expected_job_id_present = True
152+
with caplog.at_level(logging.INFO):
153+
# Log request payload and curl command for POST request
154+
response = post_bulk_api(payload, logging)
155+
156+
# Check if job_id is present in the response
157+
job_id_present = "job_id" in response.json() and isinstance(response.json()["job_id"], str)
158+
assert job_id_present == expected_job_id_present, f"Expected job_id presence to be {expected_job_id_present} but was {job_id_present}"
159+
160+
# If a job_id is generated, run the GET request test
161+
if job_id_present:
162+
validate_job_status(response.json()["job_id"], URL, caplog)
163+
else:
164+
response = post_bulk_api(payload, logging)
165+
print("Response:", response.json())
166+
assert response.status_code == ERROR_STATUS_CODE
167+
assert expected_error in response.json()["message"]
168+
169+
170+
@pytest.mark.test_bulk_api_ros
171+
def test_bulk_validate_datasource_missing(cluster_type):
172+
job_id = "job-missing-ds"
173+
ds_name = "ds-missing-test"
174+
form_kruize_url(cluster_type)
175+
URL = get_kruize_url()
176+
177+
delete_and_create_metric_profile()
178+
179+
# list and validate default metric profile
180+
metric_profile_input_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json'
181+
json_data = json.load(open(metric_profile_input_json_file))
182+
metric_profile_name = json_data['metadata']['name']
183+
184+
response = list_metric_profiles(name=metric_profile_name, logging=False)
185+
metric_profile_json = response.json()
186+
187+
assert response.status_code == SUCCESS_200_STATUS_CODE
188+
189+
errorMsg = validate_list_metric_profiles_json(metric_profile_json, list_metric_profiles_schema)
190+
assert errorMsg == ""
191+
192+
delete_and_create_metadata_profile()
193+
194+
# list and validate default metadata profile
195+
metadata_profile_input_json_file = metadata_profile_dir / 'bulk_cluster_metadata_local_monitoring.json'
196+
json_data = json.load(open(metadata_profile_input_json_file))
197+
metadata_profile_name = json_data['metadata']['name']
198+
199+
response = list_metadata_profiles(name=metadata_profile_name, logging=False)
200+
metadata_profile_json = response.json()
201+
202+
assert response.status_code == SUCCESS_200_STATUS_CODE
203+
204+
errorMsg = validate_list_metadata_profiles_json(metadata_profile_json, list_metadata_profiles_schema)
205+
assert errorMsg == ""
206+
207+
# verify list does not contain it
208+
items = list_datasources().json()
209+
print("Items:", items)
210+
datasources = items.get("datasources", [])
211+
assert all(ds.get("name") != ds_name for ds in datasources), \
212+
f"Datasource with name '{ds_name}' already exists"
213+
214+
# Build payload referencing the missing datasource
215+
payload = base_payload()
216+
payload["datasource"] = ds_name
217+
payload["time_range"]["start"] = "2025-01-01T00:00:00Z"
218+
payload["time_range"]["end"] = "2025-01-02T02:00:00Z"
219+
220+
response = post_bulk_api(payload, logging)
221+
print("Response:", response.json())
222+
assert response.status_code == ERROR_STATUS_CODE
223+
assert DATASOURCE_NOT_SERVICEABLE in response.json()["message"]

0 commit comments

Comments
 (0)