|
35 | 35 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
36 | 36 | logger = logging.getLogger(__name__) |
37 | 37 |
|
| 38 | +# Base valid payload generator |
| 39 | +def base_payload(): |
| 40 | + return { |
| 41 | + "filter": { |
| 42 | + "exclude": {"namespace": [], "workload": [], "containers": [], "labels": {}}, |
| 43 | + "include": {"namespace": [], "workload": [], "containers": [], "labels": {}} |
| 44 | + }, |
| 45 | + "metadata_profile": "cluster-metadata-local-monitoring", |
| 46 | + "measurement_duration": "15mins", |
| 47 | + "time_range": {} |
| 48 | + } |
38 | 49 |
|
39 | 50 | @pytest.mark.test_bulk_api_ros |
40 | 51 | @pytest.mark.sanity |
41 | 52 | @pytest.mark.parametrize("bulk_request_payload, expected_job_id_present", [ |
42 | 53 | ({}, True), # Test with an empty payload to check if a job_id is created. |
43 | | - ({ |
44 | | - "filter": { |
45 | | - "exclude": { |
46 | | - "namespace": [], |
47 | | - "workload": [], |
48 | | - "containers": [], |
49 | | - "labels": {} |
50 | | - }, |
51 | | - "include": { |
52 | | - "namespace": [], |
53 | | - "workload": [], |
54 | | - "containers": [], |
55 | | - "labels": {} |
56 | | - } |
57 | | - }, |
58 | | - "time_range": {} |
59 | | - }, True) # Test with a sample payload with some JSON content |
| 54 | + (base_payload(),True) # Test with a sample payload with some JSON content |
60 | 55 | ]) |
61 | 56 | def test_bulk_post_request(cluster_type, bulk_request_payload, expected_job_id_present, caplog): |
62 | 57 | form_kruize_url(cluster_type) |
@@ -104,3 +99,125 @@ def test_bulk_post_request(cluster_type, bulk_request_payload, expected_job_id_p |
104 | 99 | if job_id_present: |
105 | 100 | validate_job_status(response.json()["job_id"], URL, caplog) |
106 | 101 |
|
| 102 | + |
| 103 | +@pytest.mark.test_bulk_api_ros |
| 104 | +@pytest.mark.parametrize("start, end, expected_error", [ |
| 105 | + ("2025-01-01T12:00:00Z", "2025-01-02T12:00:00Z", "Valid"), # Valid scenario |
| 106 | + ("", "", "Invalid date format. Must follow ISO 8601 format (YYYY-MM-DDTHH:mm:ss.sssZ) for the jobId:"), # empty |
| 107 | + ("2024-01-01 10:00:00", "2024-01-01T12:00:00Z", "Invalid date format. Must follow ISO 8601 format (YYYY-MM-DDTHH:mm:ss.sssZ) for the jobId:"), # bad format |
| 108 | + ("2025-01-02T12:00:00Z", "2025-01-01T12:00:00Z", "Start time should be before end time for the jobId:"), # start > end |
| 109 | + ]) |
| 110 | +def test_bulk_api_time_range_validation(cluster_type, start, end, expected_error, caplog): |
| 111 | + """ |
| 112 | + Validates all negative time-range scenarios for Bulk API. |
| 113 | + """ |
| 114 | + form_kruize_url(cluster_type) |
| 115 | + URL = get_kruize_url() |
| 116 | + payload = base_payload() |
| 117 | + |
| 118 | + payload["time_range"]["start"] = start |
| 119 | + payload["time_range"]["end"] = end |
| 120 | + |
| 121 | + delete_and_create_metric_profile() |
| 122 | + |
| 123 | + # list and validate default metric profile |
| 124 | + metric_profile_input_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json' |
| 125 | + json_data = json.load(open(metric_profile_input_json_file)) |
| 126 | + metric_profile_name = json_data['metadata']['name'] |
| 127 | + |
| 128 | + response = list_metric_profiles(name=metric_profile_name, logging=False) |
| 129 | + metric_profile_json = response.json() |
| 130 | + |
| 131 | + assert response.status_code == SUCCESS_200_STATUS_CODE |
| 132 | + |
| 133 | + errorMsg = validate_list_metric_profiles_json(metric_profile_json, list_metric_profiles_schema) |
| 134 | + assert errorMsg == "" |
| 135 | + |
| 136 | + delete_and_create_metadata_profile() |
| 137 | + |
| 138 | + # list and validate default metadata profile |
| 139 | + metadata_profile_input_json_file = metadata_profile_dir / 'bulk_cluster_metadata_local_monitoring.json' |
| 140 | + json_data = json.load(open(metadata_profile_input_json_file)) |
| 141 | + metadata_profile_name = json_data['metadata']['name'] |
| 142 | + |
| 143 | + response = list_metadata_profiles(name=metadata_profile_name, logging=False) |
| 144 | + metadata_profile_json = response.json() |
| 145 | + |
| 146 | + assert response.status_code == SUCCESS_200_STATUS_CODE |
| 147 | + |
| 148 | + errorMsg = validate_list_metadata_profiles_json(metadata_profile_json, list_metadata_profiles_schema) |
| 149 | + assert errorMsg == "" |
| 150 | + if expected_error == "Valid": |
| 151 | + expected_job_id_present = True |
| 152 | + with caplog.at_level(logging.INFO): |
| 153 | + # Log request payload and curl command for POST request |
| 154 | + response = post_bulk_api(payload, logging) |
| 155 | + |
| 156 | + # Check if job_id is present in the response |
| 157 | + job_id_present = "job_id" in response.json() and isinstance(response.json()["job_id"], str) |
| 158 | + assert job_id_present == expected_job_id_present, f"Expected job_id presence to be {expected_job_id_present} but was {job_id_present}" |
| 159 | + |
| 160 | + # If a job_id is generated, run the GET request test |
| 161 | + if job_id_present: |
| 162 | + validate_job_status(response.json()["job_id"], URL, caplog) |
| 163 | + else: |
| 164 | + response = post_bulk_api(payload, logging) |
| 165 | + print("Response:", response.json()) |
| 166 | + assert response.status_code == ERROR_STATUS_CODE |
| 167 | + assert expected_error in response.json()["message"] |
| 168 | + |
| 169 | + |
| 170 | +@pytest.mark.test_bulk_api_ros |
| 171 | +def test_bulk_validate_datasource_missing(cluster_type): |
| 172 | + job_id = "job-missing-ds" |
| 173 | + ds_name = "ds-missing-test" |
| 174 | + form_kruize_url(cluster_type) |
| 175 | + URL = get_kruize_url() |
| 176 | + |
| 177 | + delete_and_create_metric_profile() |
| 178 | + |
| 179 | + # list and validate default metric profile |
| 180 | + metric_profile_input_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json' |
| 181 | + json_data = json.load(open(metric_profile_input_json_file)) |
| 182 | + metric_profile_name = json_data['metadata']['name'] |
| 183 | + |
| 184 | + response = list_metric_profiles(name=metric_profile_name, logging=False) |
| 185 | + metric_profile_json = response.json() |
| 186 | + |
| 187 | + assert response.status_code == SUCCESS_200_STATUS_CODE |
| 188 | + |
| 189 | + errorMsg = validate_list_metric_profiles_json(metric_profile_json, list_metric_profiles_schema) |
| 190 | + assert errorMsg == "" |
| 191 | + |
| 192 | + delete_and_create_metadata_profile() |
| 193 | + |
| 194 | + # list and validate default metadata profile |
| 195 | + metadata_profile_input_json_file = metadata_profile_dir / 'bulk_cluster_metadata_local_monitoring.json' |
| 196 | + json_data = json.load(open(metadata_profile_input_json_file)) |
| 197 | + metadata_profile_name = json_data['metadata']['name'] |
| 198 | + |
| 199 | + response = list_metadata_profiles(name=metadata_profile_name, logging=False) |
| 200 | + metadata_profile_json = response.json() |
| 201 | + |
| 202 | + assert response.status_code == SUCCESS_200_STATUS_CODE |
| 203 | + |
| 204 | + errorMsg = validate_list_metadata_profiles_json(metadata_profile_json, list_metadata_profiles_schema) |
| 205 | + assert errorMsg == "" |
| 206 | + |
| 207 | + # verify list does not contain it |
| 208 | + items = list_datasources().json() |
| 209 | + print("Items:", items) |
| 210 | + datasources = items.get("datasources", []) |
| 211 | + assert all(ds.get("name") != ds_name for ds in datasources), \ |
| 212 | + f"Datasource with name '{ds_name}' already exists" |
| 213 | + |
| 214 | + # Build payload referencing the missing datasource |
| 215 | + payload = base_payload() |
| 216 | + payload["datasource"] = ds_name |
| 217 | + payload["time_range"]["start"] = "2025-01-01T00:00:00Z" |
| 218 | + payload["time_range"]["end"] = "2025-01-02T02:00:00Z" |
| 219 | + |
| 220 | + response = post_bulk_api(payload, logging) |
| 221 | + print("Response:", response.json()) |
| 222 | + assert response.status_code == ERROR_STATUS_CODE |
| 223 | + assert DATASOURCE_NOT_SERVICEABLE in response.json()["message"] |
0 commit comments