11import os .path
22import pathlib
33
4+ import pytest
5+
46from src .json_repair import repair_json
57
68path = pathlib .Path (__file__ ).parent .resolve ()
9+ CI = os .getenv ("CI" ) is not None
710
811with open (os .path .join (path , "valid.json" )) as fd :
912 correct_json = fd .read ()
1215 incorrect_json = fd .read ()
1316
1417
18+ @pytest .mark .skipif (CI , reason = "Performance tests are skipped in CI" )
1519def test_true_true_correct (benchmark ):
1620 benchmark (repair_json , correct_json , return_objects = True , skip_json_loads = True )
1721
@@ -25,6 +29,7 @@ def test_true_true_correct(benchmark):
2529 assert mean_time < max_time , f"Benchmark exceeded threshold: { mean_time :.3f} s > { max_time :.3f} s"
2630
2731
32+ @pytest .mark .skipif (CI , reason = "Performance tests are skipped in CI" )
2833def test_true_true_incorrect (benchmark ):
2934 benchmark (repair_json , incorrect_json , return_objects = True , skip_json_loads = True )
3035
@@ -38,6 +43,7 @@ def test_true_true_incorrect(benchmark):
3843 assert mean_time < max_time , f"Benchmark exceeded threshold: { mean_time :.3f} s > { max_time :.3f} s"
3944
4045
46+ @pytest .mark .skipif (CI , reason = "Performance tests are skipped in CI" )
4147def test_true_false_correct (benchmark ):
4248 benchmark (repair_json , correct_json , return_objects = True , skip_json_loads = False )
4349 # Retrieve the median execution time
@@ -50,6 +56,7 @@ def test_true_false_correct(benchmark):
5056 assert mean_time < max_time , f"Benchmark exceeded threshold: { mean_time :.3f} s > { max_time :.3f} s"
5157
5258
59+ @pytest .mark .skipif (CI , reason = "Performance tests are skipped in CI" )
5360def test_true_false_incorrect (benchmark ):
5461 benchmark (repair_json , incorrect_json , return_objects = True , skip_json_loads = False )
5562 # Retrieve the median execution time
@@ -62,6 +69,7 @@ def test_true_false_incorrect(benchmark):
6269 assert mean_time < max_time , f"Benchmark exceeded threshold: { mean_time :.3f} s > { max_time :.3f} s"
6370
6471
72+ @pytest .mark .skipif (CI , reason = "Performance tests are skipped in CI" )
6573def test_false_true_correct (benchmark ):
6674 benchmark (repair_json , correct_json , return_objects = False , skip_json_loads = True )
6775 # Retrieve the median execution time
@@ -74,6 +82,7 @@ def test_false_true_correct(benchmark):
7482 assert mean_time < max_time , f"Benchmark exceeded threshold: { mean_time :.3f} s > { max_time :.3f} s"
7583
7684
85+ @pytest .mark .skipif (CI , reason = "Performance tests are skipped in CI" )
7786def test_false_true_incorrect (benchmark ):
7887 benchmark (repair_json , incorrect_json , return_objects = False , skip_json_loads = True )
7988 # Retrieve the median execution time
@@ -86,6 +95,7 @@ def test_false_true_incorrect(benchmark):
8695 assert mean_time < max_time , f"Benchmark exceeded threshold: { mean_time :.3f} s > { max_time :.3f} s"
8796
8897
98+ @pytest .mark .skipif (CI , reason = "Performance tests are skipped in CI" )
8999def test_false_false_correct (benchmark ):
90100 benchmark (repair_json , correct_json , return_objects = False , skip_json_loads = False )
91101 # Retrieve the median execution time
@@ -98,6 +108,7 @@ def test_false_false_correct(benchmark):
98108 assert mean_time < max_time , f"Benchmark exceeded threshold: { mean_time :.3f} s > { max_time :.3f} s"
99109
100110
111+ @pytest .mark .skipif (CI , reason = "Performance tests are skipped in CI" )
101112def test_false_false_incorrect (benchmark ):
102113 benchmark (repair_json , incorrect_json , return_objects = False , skip_json_loads = False )
103114 # Retrieve the median execution time
0 commit comments