|
| 1 | +""" |
| 2 | +Copyright (c) 2025 IBM Corporation and others. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +""" |
| 16 | +import tempfile |
| 17 | + |
| 18 | +import pytest |
| 19 | +import sys |
| 20 | + |
| 21 | + |
| 22 | +from helpers.list_metric_profiles_validate import * |
| 23 | +sys.path.append("../../") |
| 24 | + |
| 25 | +from helpers.fixtures import * |
| 26 | +from helpers.utils import * |
| 27 | + |
| 28 | +perf_profile_dir = get_metric_profile_dir() |
| 29 | +mandatory_fields = [ |
| 30 | + ("name", ERROR_STATUS_CODE, ERROR_STATUS), |
| 31 | + ("profile_version", ERROR_STATUS_CODE, ERROR_STATUS), |
| 32 | + ("sloInfo", ERROR_STATUS_CODE, ERROR_STATUS), |
| 33 | + ("direction", ERROR_STATUS_CODE, ERROR_STATUS), |
| 34 | + ("objective_function", ERROR_STATUS_CODE, ERROR_STATUS), |
| 35 | + ("function_type", ERROR_STATUS_CODE, ERROR_STATUS), |
| 36 | + ("function_variables", ERROR_STATUS_CODE, ERROR_STATUS), |
| 37 | + ("metric_name", ERROR_STATUS_CODE, ERROR_STATUS), |
| 38 | + ("datasource", ERROR_STATUS_CODE, ERROR_STATUS), |
| 39 | + ("value_type", ERROR_STATUS_CODE, ERROR_STATUS), |
| 40 | + ("aggregation_functions", ERROR_STATUS_CODE, ERROR_STATUS), |
| 41 | + ("function", ERROR_STATUS_CODE, ERROR_STATUS), |
| 42 | + ("query", ERROR_STATUS_CODE, ERROR_STATUS) |
| 43 | +] |
| 44 | + |
| 45 | +@pytest.mark.perf_profile |
| 46 | +def test_create_performance_profile(cluster_type): |
| 47 | + """ |
| 48 | + Test Description: This test validates the response status code of createPerformanceProfile API by passing a |
| 49 | + valid input for the json |
| 50 | + """ |
| 51 | + # Form the kruize url |
| 52 | + form_kruize_url(cluster_type) |
| 53 | + perf_profile_json_file = perf_profile_dir / 'resource_optimization_openshift.json' |
| 54 | + # Delete any existing profile |
| 55 | + response = delete_performance_profile(perf_profile_json_file) |
| 56 | + print("delete API status code = ", response.status_code) |
| 57 | + data = response.json() |
| 58 | + print("delete API status message = ", data["message"]) |
| 59 | + |
| 60 | + # Create the performance profile |
| 61 | + response = create_performance_profile(perf_profile_json_file) |
| 62 | + data = response.json() |
| 63 | + print(data['message']) |
| 64 | + |
| 65 | + with open(perf_profile_json_file, "r") as f: |
| 66 | + json_data = json.load(f) |
| 67 | + perf_profile_name = json_data["name"] |
| 68 | + perf_profile_version = json_data["profile_version"] |
| 69 | + |
| 70 | + assert response.status_code == SUCCESS_STATUS_CODE |
| 71 | + assert data['status'] == SUCCESS_STATUS |
| 72 | + assert CREATE_PERF_PROFILE_SUCCESS_MSG % perf_profile_name in data['message'] |
| 73 | + |
| 74 | + # Validate using listPerformanceProfile API |
| 75 | + response = list_performance_profiles() |
| 76 | + perf_profile_version_response = response.json()[0]["profile_version"] |
| 77 | + assert perf_profile_version_response == perf_profile_version |
| 78 | + |
| 79 | + perf_profile_json = response.json() |
| 80 | + # Validate the json against the json schema |
| 81 | + errorMsg = validate_list_metric_profiles_json(perf_profile_json, list_metric_profiles_schema) |
| 82 | + assert errorMsg == "" |
| 83 | + |
| 84 | + response = delete_performance_profile(perf_profile_json_file) |
| 85 | + print("delete performance profile = ", response.status_code) |
| 86 | + |
| 87 | + |
| 88 | +@pytest.mark.perf_profile |
| 89 | +def test_create_performance_profile_with_duplicate_data(cluster_type): |
| 90 | + """ |
| 91 | + Test Description: This test validates the response message of createPerformanceProfile API by passing the same data twice |
| 92 | + """ |
| 93 | + # Form the kruize url |
| 94 | + form_kruize_url(cluster_type) |
| 95 | + perf_profile_json_file = perf_profile_dir / 'resource_optimization_openshift.json' |
| 96 | + # Delete any existing profile |
| 97 | + response = delete_performance_profile(perf_profile_json_file) |
| 98 | + print("delete API status code = ", response.status_code) |
| 99 | + data = response.json() |
| 100 | + print("delete API status message = ", data["message"]) |
| 101 | + |
| 102 | + # Create the performance profile |
| 103 | + response = create_performance_profile(perf_profile_json_file) |
| 104 | + data = response.json() |
| 105 | + print(data['message']) |
| 106 | + |
| 107 | + with open(perf_profile_json_file, "r") as f: |
| 108 | + json_data = json.load(f) |
| 109 | + perf_profile_name = json_data["name"] |
| 110 | + perf_profile_version = json_data["profile_version"] |
| 111 | + |
| 112 | + assert response.status_code == SUCCESS_STATUS_CODE |
| 113 | + assert data['status'] == SUCCESS_STATUS |
| 114 | + assert CREATE_PERF_PROFILE_SUCCESS_MSG % perf_profile_name in data['message'] |
| 115 | + |
| 116 | + # Validate using listPerformanceProfile API |
| 117 | + response = list_performance_profiles() |
| 118 | + perf_profile_version_response = response.json()[0]["profile_version"] |
| 119 | + assert perf_profile_version_response == perf_profile_version |
| 120 | + |
| 121 | + # create the performance profile again |
| 122 | + response = create_performance_profile(perf_profile_json_file) |
| 123 | + data = response.json() |
| 124 | + print(data['message']) |
| 125 | + |
| 126 | + assert response.status_code == ERROR_409_STATUS_CODE |
| 127 | + assert data['status'] == ERROR_STATUS |
| 128 | + assert data['message'] == CREATE_PERF_PROFILE_DUPLICATE_RECORD_MSG % perf_profile_name |
| 129 | + |
| 130 | + response = delete_performance_profile(perf_profile_json_file) |
| 131 | + print("delete performance profile = ", response.status_code) |
| 132 | + |
| 133 | + |
| 134 | +@pytest.mark.perf_profile |
| 135 | +@pytest.mark.parametrize("field, expected_status_code, expected_status", mandatory_fields) |
| 136 | +def test_create_performance_profiles_mandatory_fields(cluster_type, field, expected_status_code, expected_status): |
| 137 | + """ |
| 138 | + Test Description: This test Validates error response of createPerformanceProfile API when mandatory fields are missing. |
| 139 | + """ |
| 140 | + |
| 141 | + # Form the kruize url |
| 142 | + form_kruize_url(cluster_type) |
| 143 | + input_json_file = perf_profile_dir / 'resource_optimization_openshift.json' |
| 144 | + # Delete any existing profile |
| 145 | + response = delete_performance_profile(input_json_file) |
| 146 | + print("delete API status code = ", response.status_code) |
| 147 | + data = response.json() |
| 148 | + print("delete API status message = ", data["message"]) |
| 149 | + |
| 150 | + json_file = "/tmp/create_performance_profile.json" |
| 151 | + json_data = json.load(open(input_json_file)) |
| 152 | + |
| 153 | + if field == "name": |
| 154 | + json_data.pop("name", None) |
| 155 | + elif field == "profile_version": |
| 156 | + json_data.pop("profile_version", None) |
| 157 | + elif field == "sloInfo": |
| 158 | + json_data.pop("slo", None) |
| 159 | + elif field == "direction": |
| 160 | + json_data['slo'].pop("direction", None) |
| 161 | + elif field == "objective_function": |
| 162 | + json_data['slo'].pop("objective_function", None) |
| 163 | + field = "objectiveFunction" |
| 164 | + elif field == "function_type": |
| 165 | + json_data['slo']['objective_function'].pop("function_type", None) |
| 166 | + elif field == "function_variables": |
| 167 | + json_data['slo'].pop("function_variables", None) |
| 168 | + field = "functionVariables" |
| 169 | + elif field == "metric_name": |
| 170 | + json_data['slo']['function_variables'][0].pop("name", None) |
| 171 | + field = "name" |
| 172 | + elif field == "datasource": |
| 173 | + json_data['slo']['function_variables'][0].pop("datasource", None) |
| 174 | + elif field == "value_type": |
| 175 | + json_data['slo']['function_variables'][0].pop("value_type", None) |
| 176 | + field = "valueType" |
| 177 | + elif field == "aggregation_functions": |
| 178 | + json_data['slo']['function_variables'][0].pop("aggregation_functions", None) |
| 179 | + elif field == "function": |
| 180 | + json_data['slo']['function_variables'][0]['aggregation_functions'][0].pop("function", None) |
| 181 | + elif field == "query": |
| 182 | + json_data['slo']['function_variables'][0]['aggregation_functions'][0].pop("query", None) |
| 183 | + |
| 184 | + print("\n*****************************************") |
| 185 | + print(json_data) |
| 186 | + print("*****************************************\n") |
| 187 | + data = json.dumps(json_data) |
| 188 | + with open(json_file, 'w') as file: |
| 189 | + file.write(data) |
| 190 | + |
| 191 | + # Create performance profile using the specified json |
| 192 | + response = create_performance_profile(json_file) |
| 193 | + data = response.json() |
| 194 | + print(data['message']) |
| 195 | + |
| 196 | + assert response.status_code == expected_status_code, \ |
| 197 | + f"Mandatory field check failed for {field} actual - {response.status_code} expected - {expected_status_code}" |
| 198 | + assert data['status'] == expected_status |
| 199 | + |
| 200 | + if field == "aggregation_functions": |
| 201 | + assert data['message'] == AGGR_FUNC_MISSING_MANDATORY_PARAMETERS_MSG |
| 202 | + else: |
| 203 | + assert data['message'] == CREATE_METRIC_PROFILE_MISSING_MANDATORY_PARAMETERS_MSG % field |
0 commit comments