diff --git a/tests/scripts/helpers/kruize.py b/tests/scripts/helpers/kruize.py index cd8ec719b..c045e42fa 100644 --- a/tests/scripts/helpers/kruize.py +++ b/tests/scripts/helpers/kruize.py @@ -126,7 +126,7 @@ def update_results(result_json_file, logging=True): # Description: This function generates recommendation for the given experiment_name , start time and end time . -def update_recommendations(experiment_name, startTime, endTime): +def update_recommendations(experiment_name, startTime, endTime, request_id=None): print("\n************************************************************") print("\nUpdating the recommendation \n for %s for dates Start-time: %s and End-time: %s..." % ( experiment_name, startTime, endTime)) @@ -137,6 +137,8 @@ def update_recommendations(experiment_name, startTime, endTime): queryString = queryString + "&interval_end_time=%s" % (endTime) if startTime: queryString = queryString + "&interval_start_time=%s" % (startTime) + if request_id is not None: + queryString = queryString + "&request_id=%s" % (request_id) url = URL + "/updateRecommendations?%s" % (queryString) print("URL = ", url) diff --git a/tests/scripts/helpers/utils.py b/tests/scripts/helpers/utils.py index edfabc6cf..0c1c4377e 100644 --- a/tests/scripts/helpers/utils.py +++ b/tests/scripts/helpers/utils.py @@ -16,11 +16,14 @@ import csv import json import os +import random import re +import string import subprocess import time import math import docker +import tempfile from helpers.kruize import * from datetime import datetime, timedelta from kubernetes import client, config @@ -103,6 +106,7 @@ MISSING_METADATA_PROFILE_NAME_PARAMETER = "Missing metadata profile 'name' parameter" DELETE_METADATA_PROFILE_SUCCESS_MSG = "Metadata profile: %s deleted successfully. View Metadata Profiles at /listMetadataProfiles" IMPORT_METADATA_INVALID_METADATA_PROFILE_NAME = "MetadataProfile - %s either does not exist or is not valid" +INVALID_REQUEST_ID = "Invalid request_id format. Should be a 32-character alphanumeric" # Kruize Recommendations Notification codes @@ -1973,3 +1977,33 @@ def validate_metadata_workloads(metadata_json, namespace, workload, container): f"Validation failed: No entry found for namespace='{namespace}', " f"workload='{workload}', and container='{container}'." ) + + +# Generate request_id +def generate_request_id(length=32): + return ''.join(random.choices(string.ascii_letters + string.digits, k=length)) + +# modify the input json to include request_id and create a temp file with it +def inject_request_id_and_save(input_file, request_id): + with open(input_file, "r") as f: + input_json = json.load(f) + for experiment in input_json: + experiment["request_id"] = request_id + + temp_file = tempfile.NamedTemporaryFile(delete=False, mode='w', suffix='.json') + json.dump(input_json, temp_file, indent=2) + temp_file.flush() + return temp_file.name + + +def get_kruize_pod_logs(namespace="openshift-tuning"): + try: + pod_name = subprocess.check_output( + ["oc", "get", "pod", "-n", namespace, "-l", "app=kruize", "-o", "jsonpath={.items[0].metadata.name}"] + ).decode().strip() + logs = subprocess.check_output(["oc", "logs", pod_name, "-n", namespace]).decode() + return logs + except subprocess.CalledProcessError as e: + print(f"Error fetching logs: {e}") + return "" + diff --git a/tests/scripts/remote_monitoring_tests/Remote_monitoring_tests.md b/tests/scripts/remote_monitoring_tests/Remote_monitoring_tests.md index b69ff5cff..b6a7dccd5 100644 --- a/tests/scripts/remote_monitoring_tests/Remote_monitoring_tests.md +++ b/tests/scripts/remote_monitoring_tests/Remote_monitoring_tests.md @@ -30,6 +30,8 @@ Here are the test scenarios: - Create container experiment specifying namespaces - Create multiple namespace experiments with valid namespace - Validate the experiment for the presence of experiment_type, creation_date and update_date along with its default values using listExperiments +- Create experiment with valid request_id +- Create experiment with multiple invalid request_ids ### **Update Results API tests** @@ -44,6 +46,8 @@ Here are the test scenarios: - Update results for an invalid experiment or a non-existing experiment - Test with invalid values such as blank, null or an invalid value for various keys in the updateResults json - Update the same results twice for the same experiment +- Update results with valid request_id +- Update results with multiple invalid request_ids Namespace Related Test Scenarios: Sanity Tests @@ -128,6 +132,8 @@ Here are the test scenarios: - Update recommendations with unknown experiment_name - Update recommendations with unknown end_time - Update recommendations with end_time preceding start_time +- Update recommendations with valid request_id +- Update recommendations with multiple invalid request_ids Namespace Related Test Scenarios: Sanity Tests diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py index 3a840899a..32419b07c 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_create_experiment.py @@ -751,3 +751,64 @@ def test_create_exp_exptype_dates_columns(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + + +@pytest.mark.sanity +def test_create_exp_with_valid_request_id(cluster_type): + """ + Test Description: This test validates the experiment creation with the request_id by passing a + valid input json + """ + input_json_file = "../json_files/create_exp.json" + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + # Generate a random valid request_id + request_id = generate_request_id(32) + temp_file_path = inject_request_id_and_save(input_json_file, request_id) + + response = create_experiment(temp_file_path) + data = response.json() + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # validate the request_id in logs + logs = get_kruize_pod_logs() + assert f"request_id : {request_id}" in logs, f"request_id {request_id} not found in pod logs" + + response = delete_experiment(temp_file_path) + print("delete exp = ", response.status_code) + # delete the temp file + os.remove(temp_file_path) + + +@pytest.mark.negative +@pytest.mark.parametrize("invalid_request_id", [ + "", "abc123", generate_request_id(33), "1234567890abcdef!@#$%^&*()", " " * 32 +]) +def test_create_exp_with_invalid_request_id(cluster_type, invalid_request_id): + """ + Test Description: This test validates the experiment creation by passing the invalid request_ids in the input json + """ + input_json_file = "../json_files/create_exp.json" + form_kruize_url(cluster_type) + + # pass the invalid request_id + temp_file_path = inject_request_id_and_save(input_json_file, invalid_request_id) + response = delete_experiment(temp_file_path) + print("delete exp = ", response.status_code) + + response = create_experiment(temp_file_path) + data = response.json() + + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + assert data['message'] == INVALID_REQUEST_ID + + response = delete_experiment(temp_file_path) + print("delete exp = ", response.status_code) + # delete the temp file + os.remove(temp_file_path) diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py index 4191c4890..42b0e85a5 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_recommendations.py @@ -663,3 +663,212 @@ def test_update_valid_namespace_recommendations(cluster_type): response = delete_experiment(json_file) print("delete exp = ", response.status_code) assert response.status_code == SUCCESS_STATUS_CODE + + +@pytest.mark.sanity +def test_update_recommendations_with_valid_request_id(cluster_type): + ''' + Creates Experiment + + update results for 0.5 hrs + + update recommendation experiment_name, interval_end_time as parameters along with a random valid request_id + Expected : recommendation should be available for the timestamp provided + Expected : plots data should be available + ''' + input_json_file = "../json_files/create_exp.json" + result_json_file = "../json_files/update_results.json" + + find = [] + json_data = json.load(open(input_json_file)) + + find.append(json_data[0]['experiment_name']) + find.append(json_data[0]['kubernetes_objects'][0]['name']) + find.append(json_data[0]['kubernetes_objects'][0]['namespace']) + + form_kruize_url(cluster_type) + num_exps = 1 + num_res = 2 + # Create experiment using the specified json + for i in range(num_exps): + create_exp_json_file = "/tmp/create_exp_" + str(i) + ".json" + generate_json(find, input_json_file, create_exp_json_file, i) + + # Delete the experiment + response = delete_experiment(create_exp_json_file) + print("delete exp = ", response.status_code) + + # Create the experiment + response = create_experiment(create_exp_json_file) + + data = response.json() + print("message = ", data['message']) + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # Update results for the experiment + update_results_json_file = "/tmp/update_results_" + str(i) + ".json" + + result_json_arr = [] + # Get the experiment name + json_data = json.load(open(create_exp_json_file)) + experiment_name = json_data[0]['experiment_name'] + interval_start_time = get_datetime() + for j in range(num_res): + update_timestamps = True + generate_json(find, result_json_file, update_results_json_file, i, update_timestamps) + result_json = read_json_data_from_file(update_results_json_file) + if j == 0: + start_time = interval_start_time + else: + start_time = end_time + + result_json[0]['interval_start_time'] = start_time + end_time = increment_timestamp_by_given_mins(start_time, 15) + result_json[0]['interval_end_time'] = end_time + + write_json_data_to_file(update_results_json_file, result_json) + result_json_arr.append(result_json[0]) + response = update_results(update_results_json_file, False) + + data = response.json() + print("message = ", data['message']) + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG + + # Expecting that we have recommendations after two results + # validate the flow with the request_id param + request_id = generate_request_id(32) + response = update_recommendations(experiment_name, None, end_time, request_id) + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data[0]['experiment_name'] == experiment_name + assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['111000'][ + 'message'] == 'Recommendations Are Available' + + # validate the request_id in logs + logs = get_kruize_pod_logs() + assert f"request_id : {request_id}" in logs, f"request_id {request_id} not found in pod logs" + + response = list_recommendations(experiment_name, rm=True) + if response.status_code == SUCCESS_200_STATUS_CODE: + recommendation_json = response.json() + recommendation_section = recommendation_json[0]["kubernetes_objects"][0]["containers"][0][ + "recommendations"] + high_level_notifications = recommendation_section["notifications"] + # Check if duration + assert INFO_RECOMMENDATIONS_AVAILABLE_CODE in high_level_notifications + data_section = recommendation_section["data"] + short_term_recommendation = data_section[str(end_time)]["recommendation_terms"]["short_term"] + short_term_notifications = short_term_recommendation["notifications"] + for notification in short_term_notifications.values(): + assert notification["type"] != "error" + + # Validate the json against the json schema + list_reco_json = response.json() + errorMsg = validate_list_reco_json(list_reco_json, list_reco_json_schema) + assert errorMsg == "" + + # Validate the json values + create_exp_json = read_json_data_from_file(create_exp_json_file) + update_results_json = [] + update_results_json.append(result_json_arr[len(result_json_arr) - 1]) + + expected_duration_in_hours = SHORT_TERM_DURATION_IN_HRS_MIN + validate_reco_json(create_exp_json[0], update_results_json, list_reco_json[0], expected_duration_in_hours) + + # Delete all the experiments + for i in range(num_exps): + json_file = "/tmp/create_exp_" + str(i) + ".json" + response = delete_experiment(json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + + +@pytest.mark.negative +@pytest.mark.parametrize("invalid_request_id", [ + "", "abc123", generate_request_id(33), "1234567890abcdef!@#$%^&*()", " " * 32 +]) +def test_update_recommendations_with_invalid_request_id(cluster_type, invalid_request_id): + ''' + Creates Experiment + + update results for 0.5 hrs + + update recommendation experiment_name, interval_end_time as parameters along with multiple invalid request_id + Expected : updateRecommendations API should fail with 400 error + ''' + input_json_file = "../json_files/create_exp.json" + result_json_file = "../json_files/update_results.json" + + find = [] + json_data = json.load(open(input_json_file)) + + find.append(json_data[0]['experiment_name']) + find.append(json_data[0]['kubernetes_objects'][0]['name']) + find.append(json_data[0]['kubernetes_objects'][0]['namespace']) + + form_kruize_url(cluster_type) + num_exps = 1 + num_res = 2 + # Create experiment using the specified json + for i in range(num_exps): + create_exp_json_file = "/tmp/create_exp_" + str(i) + ".json" + generate_json(find, input_json_file, create_exp_json_file, i) + + # Delete the experiment + response = delete_experiment(create_exp_json_file) + print("delete exp = ", response.status_code) + + # Create the experiment + response = create_experiment(create_exp_json_file) + + data = response.json() + print("message = ", data['message']) + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # Update results for the experiment + update_results_json_file = "/tmp/update_results_" + str(i) + ".json" + + result_json_arr = [] + # Get the experiment name + json_data = json.load(open(create_exp_json_file)) + experiment_name = json_data[0]['experiment_name'] + interval_start_time = get_datetime() + for j in range(num_res): + update_timestamps = True + generate_json(find, result_json_file, update_results_json_file, i, update_timestamps) + result_json = read_json_data_from_file(update_results_json_file) + if j == 0: + start_time = interval_start_time + else: + start_time = end_time + + result_json[0]['interval_start_time'] = start_time + end_time = increment_timestamp_by_given_mins(start_time, 15) + result_json[0]['interval_end_time'] = end_time + + write_json_data_to_file(update_results_json_file, result_json) + result_json_arr.append(result_json[0]) + response = update_results(update_results_json_file, False) + + data = response.json() + print("message = ", data['message']) + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG + + # After at least two results, expecting that we have recommendations + # validate the flow with the invalid request_id param + response = update_recommendations(experiment_name, None, end_time, invalid_request_id) + data = response.json() + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + assert data['message'] == INVALID_REQUEST_ID + + # Delete all the experiments + for i in range(num_exps): + json_file = "/tmp/create_exp_" + str(i) + ".json" + response = delete_experiment(json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py index 1b183b32f..3c2bb88f6 100644 --- a/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_update_results.py @@ -188,7 +188,7 @@ def test_update_results_invalid_namespace_tests( print(f"\n*******************************************************") print(f"Test - {test_name}") print("*******************************************************\n") - + create_exp_json_file = "../json_files/create_exp_namespace.json" form_kruize_url(cluster_type) @@ -201,7 +201,7 @@ def test_update_results_invalid_namespace_tests( environment = Environment(loader=FileSystemLoader("../json_files/")) template = environment.get_template("update_results_template_namespace.json") - + content = template.render( version=version, experiment_name=experiment_name, @@ -255,7 +255,7 @@ def test_update_results_invalid_namespace_tests( data = response.json() assert response.status_code == int(expected_status_code) - actual_message = data.get('message') + actual_message = data.get('message') response = delete_experiment(create_exp_json_file) print("delete exp = ", response.status_code) @@ -342,10 +342,10 @@ def test_update_results_with_missing_metrics_section_namespace(test_name, result for err in error_data: actual_error_message = err["message"] assert error_message in actual_error_message - + response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) - + @pytest.mark.negative def test_upload_namespace_results_for_container_experiment(cluster_type): @@ -370,7 +370,7 @@ def test_upload_namespace_results_for_container_experiment(cluster_type): write_json_data_to_file("/tmp/temp_update_results.json", update_res) form_kruize_url(cluster_type) - + response = delete_experiment("/tmp/temp_create_exp.json") print("delete exp = ", response.status_code) @@ -379,7 +379,7 @@ def test_upload_namespace_results_for_container_experiment(cluster_type): data = response.json() assert response.status_code == SUCCESS_STATUS_CODE assert data['status'] == SUCCESS_STATUS - + response = update_results("/tmp/temp_update_results.json") data = response.json() @@ -414,7 +414,7 @@ def test_upload_container_results_for_namespace_experiment(cluster_type): write_json_data_to_file("/tmp/temp_update_results.json", update_res) form_kruize_url(cluster_type) - + response = delete_experiment("/tmp/temp_create_exp.json") print("delete exp = ", response.status_code) @@ -422,7 +422,7 @@ def test_upload_container_results_for_namespace_experiment(cluster_type): data = response.json() assert response.status_code == SUCCESS_STATUS_CODE assert data['status'] == SUCCESS_STATUS - + response = update_results("/tmp/temp_update_results.json") data = response.json() @@ -441,10 +441,10 @@ def test_upload_bulk_namespace_results_for_container_experiment(cluster_type): to a container-based experiment fails for the namespace results. """ input_json_file = "../json_files/create_exp.json" - result_json_file = "../json_files/mixed_bulk_results_container.json" + result_json_file = "../json_files/mixed_bulk_results_container.json" form_kruize_url(cluster_type) - + response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) @@ -452,14 +452,14 @@ def test_upload_bulk_namespace_results_for_container_experiment(cluster_type): data = response.json() assert response.status_code == SUCCESS_STATUS_CODE assert data['status'] == SUCCESS_STATUS - + response = update_results(result_json_file) data = response.json() assert response.status_code == ERROR_STATUS_CODE assert data['status'] == ERROR_STATUS assert "failed to save" in data['message'] - + error_found = False for result in data.get('data', []): if result.get('errors'): @@ -469,9 +469,9 @@ def test_upload_bulk_namespace_results_for_container_experiment(cluster_type): break if error_found: break - + assert error_found, KUBERNETES_OBJECT_NAME_MISMATCH - + response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) @@ -485,7 +485,7 @@ def test_upload_bulk_container_results_for_namespace_experiment(cluster_type): result_json_file = "../json_files/mixed_bulk_results_namespace.json" form_kruize_url(cluster_type) - + response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) @@ -493,14 +493,14 @@ def test_upload_bulk_container_results_for_namespace_experiment(cluster_type): data = response.json() assert response.status_code == SUCCESS_STATUS_CODE assert data['status'] == SUCCESS_STATUS - + response = update_results(result_json_file) data = response.json() assert response.status_code == ERROR_STATUS_CODE assert data['status'] == ERROR_STATUS assert "failed to save" in data['message'] - + error_found = False for result in data.get('data', []): if result.get('errors'): @@ -510,7 +510,7 @@ def test_upload_bulk_container_results_for_namespace_experiment(cluster_type): break if error_found: break - + assert error_found, MISSING_MANDATORY_PARAMETERS response = delete_experiment(input_json_file) @@ -555,16 +555,16 @@ def test_update_results_with_zero_metric_values_fails(cluster_type): "namespaceTotalPods_name": "namespaceTotalPods", "namespaceTotalPods_max": 0, "namespaceTotalPods_avg": 0, "namespaceRunningPods_name": "namespaceRunningPods", "namespaceRunningPods_max": 0, "namespaceRunningPods_avg": 0 } - + content = template.render(**template_vars) - + with open(tmp_json_file, mode="w", encoding="utf-8") as message: message.write(content) response = update_results(tmp_json_file) data = response.json() print(f"Update Results Response: {data.get('message')}") - + assert response.status_code == ERROR_STATUS_CODE assert data['status'] == ERROR_STATUS assert CANNOT_PROCESS_ALL_ZERO_METRIC_VALUES in data['data'][0]['errors'][0]['message'] @@ -1537,3 +1537,85 @@ def test_update_results__duplicate_records_with_single_exp_multiple_results(clus # Delete the experiment response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + + +@pytest.mark.sanity +def test_update_results_with_valid_request_id(cluster_type): + """ + Test Description: This test validates update results for a valid experiment with the request_id + """ + creat_exp_json = "../json_files/create_exp.json" + + form_kruize_url(cluster_type) + response = delete_experiment(creat_exp_json) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(creat_exp_json) + + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # Update results for the experiment + result_json_file = "../json_files/update_results.json" + # Generate a random valid request_id + request_id = generate_request_id(32) + temp_file_path = inject_request_id_and_save(result_json_file, request_id) + + response = update_results(temp_file_path) + + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG + + # Allow logs to flush to validate the request_id in logs + time.sleep(2) + logs = get_kruize_pod_logs() + assert f"request_id : {request_id}" in logs, f"request_id {request_id} not found in pod logs" + + response = delete_experiment(temp_file_path) + print("delete exp = ", response.status_code) + # delete the temp file + os.remove(temp_file_path) + + +@pytest.mark.negative +@pytest.mark.parametrize("invalid_request_id", [ + "", "abc123", generate_request_id(33), "1234567890abcdef!@#$%^&*()", " " * 32 +]) +def test_update_results_with_invalid_request_id(cluster_type, invalid_request_id): + """ + Test Description: This test validates the results updation by passing the invalid request_ids in the input json + """ + creat_exp_json = "../json_files/create_exp.json" + + form_kruize_url(cluster_type) + response = delete_experiment(creat_exp_json) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(creat_exp_json) + + data = response.json() + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # Update results for the experiment + result_json_file = "../json_files/update_results.json" + temp_file_path = inject_request_id_and_save(result_json_file, invalid_request_id) + + response = update_results(temp_file_path) + data = response.json() + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + message = data['data'][0]['errors'][0]['message'] + assert message == INVALID_REQUEST_ID + + response = delete_experiment(temp_file_path) + print("delete exp = ", response.status_code) + # delete the temp file + os.remove(temp_file_path)