Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion tests/scripts/helpers/kruize.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def update_results(result_json_file, logging=True):


# Description: This function generates recommendation for the given experiment_name , start time and end time .
def update_recommendations(experiment_name, startTime, endTime):
def update_recommendations(experiment_name, startTime, endTime, request_id=None):
print("\n************************************************************")
print("\nUpdating the recommendation \n for %s for dates Start-time: %s and End-time: %s..." % (
experiment_name, startTime, endTime))
Expand All @@ -137,6 +137,8 @@ def update_recommendations(experiment_name, startTime, endTime):
queryString = queryString + "&interval_end_time=%s" % (endTime)
if startTime:
queryString = queryString + "&interval_start_time=%s" % (startTime)
if request_id is not None:
queryString = queryString + "&request_id=%s" % (request_id)

url = URL + "/updateRecommendations?%s" % (queryString)
print("URL = ", url)
Expand Down
34 changes: 34 additions & 0 deletions tests/scripts/helpers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,14 @@
import csv
import json
import os
import random
import re
import string
import subprocess
import time
import math
import docker
import tempfile
from helpers.kruize import *
from datetime import datetime, timedelta
from kubernetes import client, config
Expand Down Expand Up @@ -103,6 +106,7 @@
MISSING_METADATA_PROFILE_NAME_PARAMETER = "Missing metadata profile 'name' parameter"
DELETE_METADATA_PROFILE_SUCCESS_MSG = "Metadata profile: %s deleted successfully. View Metadata Profiles at /listMetadataProfiles"
IMPORT_METADATA_INVALID_METADATA_PROFILE_NAME = "MetadataProfile - %s either does not exist or is not valid"
INVALID_REQUEST_ID = "Invalid request_id format. Should be a 32-character alphanumeric"


# Kruize Recommendations Notification codes
Expand Down Expand Up @@ -1973,3 +1977,33 @@ def validate_metadata_workloads(metadata_json, namespace, workload, container):
f"Validation failed: No entry found for namespace='{namespace}', "
f"workload='{workload}', and container='{container}'."
)


# Generate request_id
def generate_request_id(length=32):
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))

# modify the input json to include request_id and create a temp file with it
def inject_request_id_and_save(input_file, request_id):
with open(input_file, "r") as f:
input_json = json.load(f)
for experiment in input_json:
experiment["request_id"] = request_id

temp_file = tempfile.NamedTemporaryFile(delete=False, mode='w', suffix='.json')
json.dump(input_json, temp_file, indent=2)
temp_file.flush()
return temp_file.name


def get_kruize_pod_logs(namespace="openshift-tuning"):
try:
pod_name = subprocess.check_output(
["oc", "get", "pod", "-n", namespace, "-l", "app=kruize", "-o", "jsonpath={.items[0].metadata.name}"]
).decode().strip()
logs = subprocess.check_output(["oc", "logs", pod_name, "-n", namespace]).decode()
return logs
except subprocess.CalledProcessError as e:
print(f"Error fetching logs: {e}")
return ""

Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ Here are the test scenarios:
- Create container experiment specifying namespaces
- Create multiple namespace experiments with valid namespace
- Validate the experiment for the presence of experiment_type, creation_date and update_date along with its default values using listExperiments
- Create experiment with valid request_id
- Create experiment with multiple invalid request_ids

### **Update Results API tests**

Expand All @@ -44,6 +46,8 @@ Here are the test scenarios:
- Update results for an invalid experiment or a non-existing experiment
- Test with invalid values such as blank, null or an invalid value for various keys in the updateResults json
- Update the same results twice for the same experiment
- Update results with valid request_id
- Update results with multiple invalid request_ids

Namespace Related Test Scenarios:
Sanity Tests
Expand Down Expand Up @@ -128,6 +132,8 @@ Here are the test scenarios:
- Update recommendations with unknown experiment_name
- Update recommendations with unknown end_time
- Update recommendations with end_time preceding start_time
- Update recommendations with valid request_id
- Update recommendations with multiple invalid request_ids

Namespace Related Test Scenarios:
Sanity Tests
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -751,3 +751,64 @@ def test_create_exp_exptype_dates_columns(cluster_type):

response = delete_experiment(input_json_file)
print("delete exp = ", response.status_code)


@pytest.mark.sanity
def test_create_exp_with_valid_request_id(cluster_type):
"""
Test Description: This test validates the experiment creation with the request_id by passing a
valid input json
"""
input_json_file = "../json_files/create_exp.json"
form_kruize_url(cluster_type)

response = delete_experiment(input_json_file)
print("delete exp = ", response.status_code)
# Generate a random valid request_id
request_id = generate_request_id(32)
temp_file_path = inject_request_id_and_save(input_json_file, request_id)

response = create_experiment(temp_file_path)
data = response.json()

assert response.status_code == SUCCESS_STATUS_CODE
assert data['status'] == SUCCESS_STATUS
assert data['message'] == CREATE_EXP_SUCCESS_MSG

# validate the request_id in logs
logs = get_kruize_pod_logs()
assert f"request_id : {request_id}" in logs, f"request_id {request_id} not found in pod logs"

response = delete_experiment(temp_file_path)
print("delete exp = ", response.status_code)
# delete the temp file
os.remove(temp_file_path)


@pytest.mark.negative
@pytest.mark.parametrize("invalid_request_id", [
"", "abc123", generate_request_id(33), "1234567890abcdef!@#$%^&*()", " " * 32
])
def test_create_exp_with_invalid_request_id(cluster_type, invalid_request_id):
"""
Test Description: This test validates the experiment creation by passing the invalid request_ids in the input json
"""
input_json_file = "../json_files/create_exp.json"
form_kruize_url(cluster_type)

# pass the invalid request_id
temp_file_path = inject_request_id_and_save(input_json_file, invalid_request_id)
response = delete_experiment(temp_file_path)
print("delete exp = ", response.status_code)

response = create_experiment(temp_file_path)
data = response.json()

assert response.status_code == ERROR_STATUS_CODE
assert data['status'] == ERROR_STATUS
assert data['message'] == INVALID_REQUEST_ID

response = delete_experiment(temp_file_path)
print("delete exp = ", response.status_code)
# delete the temp file
os.remove(temp_file_path)
Original file line number Diff line number Diff line change
Expand Up @@ -663,3 +663,212 @@ def test_update_valid_namespace_recommendations(cluster_type):
response = delete_experiment(json_file)
print("delete exp = ", response.status_code)
assert response.status_code == SUCCESS_STATUS_CODE


@pytest.mark.sanity
def test_update_recommendations_with_valid_request_id(cluster_type):
'''
Creates Experiment +
update results for 0.5 hrs +
update recommendation experiment_name, interval_end_time as parameters along with a random valid request_id
Expected : recommendation should be available for the timestamp provided
Expected : plots data should be available
'''
input_json_file = "../json_files/create_exp.json"
result_json_file = "../json_files/update_results.json"

find = []
json_data = json.load(open(input_json_file))

find.append(json_data[0]['experiment_name'])
find.append(json_data[0]['kubernetes_objects'][0]['name'])
find.append(json_data[0]['kubernetes_objects'][0]['namespace'])

form_kruize_url(cluster_type)
num_exps = 1
num_res = 2
# Create experiment using the specified json
for i in range(num_exps):
create_exp_json_file = "/tmp/create_exp_" + str(i) + ".json"
generate_json(find, input_json_file, create_exp_json_file, i)

# Delete the experiment
response = delete_experiment(create_exp_json_file)
print("delete exp = ", response.status_code)

# Create the experiment
response = create_experiment(create_exp_json_file)

data = response.json()
print("message = ", data['message'])
assert response.status_code == SUCCESS_STATUS_CODE
assert data['status'] == SUCCESS_STATUS
assert data['message'] == CREATE_EXP_SUCCESS_MSG

# Update results for the experiment
update_results_json_file = "/tmp/update_results_" + str(i) + ".json"

result_json_arr = []
# Get the experiment name
json_data = json.load(open(create_exp_json_file))
experiment_name = json_data[0]['experiment_name']
interval_start_time = get_datetime()
for j in range(num_res):
update_timestamps = True
generate_json(find, result_json_file, update_results_json_file, i, update_timestamps)
result_json = read_json_data_from_file(update_results_json_file)
if j == 0:
start_time = interval_start_time
else:
start_time = end_time

result_json[0]['interval_start_time'] = start_time
end_time = increment_timestamp_by_given_mins(start_time, 15)
result_json[0]['interval_end_time'] = end_time

write_json_data_to_file(update_results_json_file, result_json)
result_json_arr.append(result_json[0])
response = update_results(update_results_json_file, False)

data = response.json()
print("message = ", data['message'])
assert response.status_code == SUCCESS_STATUS_CODE
assert data['status'] == SUCCESS_STATUS
assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG

# Expecting that we have recommendations after two results
# validate the flow with the request_id param
request_id = generate_request_id(32)
response = update_recommendations(experiment_name, None, end_time, request_id)
data = response.json()
assert response.status_code == SUCCESS_STATUS_CODE
assert data[0]['experiment_name'] == experiment_name
assert data[0]['kubernetes_objects'][0]['containers'][0]['recommendations']['notifications']['111000'][
'message'] == 'Recommendations Are Available'

# validate the request_id in logs
logs = get_kruize_pod_logs()
assert f"request_id : {request_id}" in logs, f"request_id {request_id} not found in pod logs"

response = list_recommendations(experiment_name, rm=True)
if response.status_code == SUCCESS_200_STATUS_CODE:
recommendation_json = response.json()
recommendation_section = recommendation_json[0]["kubernetes_objects"][0]["containers"][0][
"recommendations"]
high_level_notifications = recommendation_section["notifications"]
# Check if duration
assert INFO_RECOMMENDATIONS_AVAILABLE_CODE in high_level_notifications
data_section = recommendation_section["data"]
short_term_recommendation = data_section[str(end_time)]["recommendation_terms"]["short_term"]
short_term_notifications = short_term_recommendation["notifications"]
for notification in short_term_notifications.values():
assert notification["type"] != "error"

# Validate the json against the json schema
list_reco_json = response.json()
errorMsg = validate_list_reco_json(list_reco_json, list_reco_json_schema)
assert errorMsg == ""

# Validate the json values
create_exp_json = read_json_data_from_file(create_exp_json_file)
update_results_json = []
update_results_json.append(result_json_arr[len(result_json_arr) - 1])

expected_duration_in_hours = SHORT_TERM_DURATION_IN_HRS_MIN
validate_reco_json(create_exp_json[0], update_results_json, list_reco_json[0], expected_duration_in_hours)

# Delete all the experiments
for i in range(num_exps):
json_file = "/tmp/create_exp_" + str(i) + ".json"
response = delete_experiment(json_file)
print("delete exp = ", response.status_code)
assert response.status_code == SUCCESS_STATUS_CODE


@pytest.mark.negative
@pytest.mark.parametrize("invalid_request_id", [
"", "abc123", generate_request_id(33), "1234567890abcdef!@#$%^&*()", " " * 32
])
def test_update_recommendations_with_invalid_request_id(cluster_type, invalid_request_id):
'''
Creates Experiment +
update results for 0.5 hrs +
update recommendation experiment_name, interval_end_time as parameters along with multiple invalid request_id
Expected : updateRecommendations API should fail with 400 error
'''
input_json_file = "../json_files/create_exp.json"
result_json_file = "../json_files/update_results.json"

find = []
json_data = json.load(open(input_json_file))

find.append(json_data[0]['experiment_name'])
find.append(json_data[0]['kubernetes_objects'][0]['name'])
find.append(json_data[0]['kubernetes_objects'][0]['namespace'])

form_kruize_url(cluster_type)
num_exps = 1
num_res = 2
# Create experiment using the specified json
for i in range(num_exps):
create_exp_json_file = "/tmp/create_exp_" + str(i) + ".json"
generate_json(find, input_json_file, create_exp_json_file, i)

# Delete the experiment
response = delete_experiment(create_exp_json_file)
print("delete exp = ", response.status_code)

# Create the experiment
response = create_experiment(create_exp_json_file)

data = response.json()
print("message = ", data['message'])
assert response.status_code == SUCCESS_STATUS_CODE
assert data['status'] == SUCCESS_STATUS
assert data['message'] == CREATE_EXP_SUCCESS_MSG

# Update results for the experiment
update_results_json_file = "/tmp/update_results_" + str(i) + ".json"

result_json_arr = []
# Get the experiment name
json_data = json.load(open(create_exp_json_file))
experiment_name = json_data[0]['experiment_name']
interval_start_time = get_datetime()
for j in range(num_res):
update_timestamps = True
generate_json(find, result_json_file, update_results_json_file, i, update_timestamps)
result_json = read_json_data_from_file(update_results_json_file)
if j == 0:
start_time = interval_start_time
else:
start_time = end_time

result_json[0]['interval_start_time'] = start_time
end_time = increment_timestamp_by_given_mins(start_time, 15)
result_json[0]['interval_end_time'] = end_time

write_json_data_to_file(update_results_json_file, result_json)
result_json_arr.append(result_json[0])
response = update_results(update_results_json_file, False)

data = response.json()
print("message = ", data['message'])
assert response.status_code == SUCCESS_STATUS_CODE
assert data['status'] == SUCCESS_STATUS
assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG

# After at least two results, expecting that we have recommendations
# validate the flow with the invalid request_id param
response = update_recommendations(experiment_name, None, end_time, invalid_request_id)
data = response.json()
assert response.status_code == ERROR_STATUS_CODE
assert data['status'] == ERROR_STATUS
assert data['message'] == INVALID_REQUEST_ID

# Delete all the experiments
for i in range(num_exps):
json_file = "/tmp/create_exp_" + str(i) + ".json"
response = delete_experiment(json_file)
print("delete exp = ", response.status_code)
assert response.status_code == SUCCESS_STATUS_CODE
Loading