@@ -24,6 +24,10 @@ inputs:
2424 description : ' Platform architecture (amd64, arm64)'
2525 required : false
2626 default : ' amd64'
27+ dry_run :
28+ description : ' Run pytest in dry-run mode (collect tests only, do not execute)'
29+ required : false
30+ default : ' false'
2731
2832
2933runs :
@@ -54,31 +58,50 @@ runs:
5458 # Run pytest with detailed output and JUnit XML
5559 set +e # Don't exit on test failures
5660
57- # Detect GPU availability and conditionally add GPU flags
58- GPU_FLAGS=""
59- if command -v nvidia-smi &> /dev/null && nvidia-smi &> /dev/null; then
60- echo "GPU detected, enabling GPU runtime "
61- GPU_FLAGS="--runtime=nvidia --gpus all "
61+ # Determine docker runtime flags and pytest command based on dry_run mode
62+ if [[ "${{ inputs.dry_run }}" == "true" ]]; then
63+ echo "🔍 Running pytest in dry-run mode (collect-only, no GPU required)"
64+ GPU_FLAGS=" "
65+ PYTEST_CMD="pytest -v --collect-only -m \"${{ inputs.pytest_marks }}\" "
6266 else
63- echo "No GPU detected, running in CPU-only mode"
67+ echo "🚀 Running pytest in normal mode"
68+ PYTEST_CMD="pytest -v --tb=short --basetemp=/tmp -o cache_dir=/tmp/.pytest_cache --junitxml=/workspace/test-results/${{ env.PYTEST_XML_FILE }} --durations=10 -m \"${{ inputs.pytest_marks }}\""
69+
70+ # Detect GPU availability and conditionally add GPU flags
71+ GPU_FLAGS=""
72+ if command -v nvidia-smi &> /dev/null && nvidia-smi &> /dev/null; then
73+ echo "✓ GPU detected, enabling GPU runtime"
74+ GPU_FLAGS="--runtime=nvidia --gpus all"
75+ else
76+ echo "⚠️ No GPU detected, running in CPU-only mode"
77+ fi
6478 fi
6579
80+ # Get absolute path for test-results directory and ensure it has proper permissions
81+ TEST_RESULTS_DIR="$(pwd)/test-results"
82+ chmod 777 "${TEST_RESULTS_DIR}"
83+ echo "📁 Test results will be saved to: ${TEST_RESULTS_DIR}"
84+
6685 docker run ${GPU_FLAGS} --rm -w /workspace \
6786 --cpus=${NUM_CPUS} \
6887 --network host \
6988 --name ${{ env.CONTAINER_ID }}_pytest \
89+ -v "${TEST_RESULTS_DIR}:/workspace/test-results" \
7090 ${{ inputs.image_tag }} \
71- bash -c "mkdir -p /workspace/test-results && pytest -v --tb=short --basetemp=/tmp -o cache_dir=/tmp/.pytest_cache --junitxml=/workspace/test-results/${{ env.PYTEST_XML_FILE }} --durations=10 -m \"${{ inputs.pytest_marks }}\" "
91+ bash -c "${PYTEST_CMD} "
7292
7393 TEST_EXIT_CODE=$?
7494 echo "TEST_EXIT_CODE=${TEST_EXIT_CODE}" >> $GITHUB_ENV
7595 echo "🧪 Tests completed with exit code: ${TEST_EXIT_CODE}"
7696
77- # Copy test results from container to host
78- docker cp ${{ env.CONTAINER_ID }}_pytest:/workspace/test-results . || echo "Failed to copy test results"
79-
80- # Clean up container
81- docker rm -f ${{ env.CONTAINER_ID }}_pytest || echo "Failed to clean up container"
97+ # Verify test results were written (only in normal mode)
98+ if [[ "${{ inputs.dry_run }}" != "true" ]]; then
99+ if [[ -f "${TEST_RESULTS_DIR}/${{ env.PYTEST_XML_FILE }}" ]]; then
100+ echo "✅ Test results file found: ${TEST_RESULTS_DIR}/${{ env.PYTEST_XML_FILE }}"
101+ else
102+ echo "⚠️ Test results file not found: ${TEST_RESULTS_DIR}/${{ env.PYTEST_XML_FILE }}"
103+ fi
104+ fi
82105
83106 # Always continue to results processing
84107 exit 0
@@ -103,23 +126,9 @@ runs:
103126 ERROR_TESTS=$(grep -o 'errors="[0-9]*"' "$JUNIT_FILE" | grep -o '[0-9]*' | head -1 || echo "0")
104127 echo "📊 ${TOTAL_TESTS} tests completed (${FAILED_TESTS} failed, ${ERROR_TESTS} errors)"
105128
106- # Create uniquely named metadata file with step context information
107- # Use framework-testtype-arch to make it unique per test run
108- METADATA_FILE="test-results/test_metadata_${{ inputs.framework }}_${STR_TEST_TYPE}_${{ inputs.platform_arch }}.json"
109- JUNIT_NAME="pytest_test_report_${{ inputs.framework }}_${STR_TEST_TYPE}_${{ inputs.platform_arch }}.xml"
110-
111129 # Rename XML file to unique name
130+ JUNIT_NAME="pytest_test_report_${{ inputs.framework }}_${STR_TEST_TYPE}_${{ inputs.platform_arch }}_${{ github.run_id }}_${{ job.check_run_id }}.xml"
112131 mv "$JUNIT_FILE" "test-results/$JUNIT_NAME"
113-
114- echo '{' > "$METADATA_FILE"
115- echo ' "job_name": "${{ github.job }}",' >> "$METADATA_FILE"
116- echo ' "framework": "${{ inputs.framework }}",' >> "$METADATA_FILE"
117- echo ' "test_type": "${{ inputs.test_type }}",' >> "$METADATA_FILE"
118- echo ' "platform_arch": "${{ inputs.platform_arch }}",' >> "$METADATA_FILE"
119- echo ' "junit_xml_file": "'"$JUNIT_NAME"'",' >> "$METADATA_FILE"
120- echo ' "step_name": "Run ${{ inputs.test_type }} tests"' >> "$METADATA_FILE"
121- echo '}' >> "$METADATA_FILE"
122- echo "📝 Created test metadata file: $METADATA_FILE"
123132 echo "📝 Renamed XML file to: $JUNIT_NAME"
124133 else
125134 echo "⚠️ JUnit XML file not found - test results may not be available for upload"
@@ -135,8 +144,6 @@ runs:
135144 uses : actions/upload-artifact@v4
136145 if : always() # Always upload test results, even if tests failed
137146 with :
138- name : test-results-${{ inputs.framework }}-${{ env.STR_TEST_TYPE }}-${{ env.PLATFORM_ARCH }}
139- path : |
140- test-results/pytest_test_report_${{ inputs.framework }}_${{ env.STR_TEST_TYPE }}_${{ inputs.platform_arch }}.xml
141- test-results/test_metadata_${{ inputs.framework }}_${{ env.STR_TEST_TYPE }}_${{ inputs.platform_arch }}.json
147+ name : test-results-${{ inputs.framework }}-${{ env.STR_TEST_TYPE }}-${{ env.PLATFORM_ARCH }}-${{ github.run_id }}-${{ job.check_run_id }}
148+ path : test-results/pytest_test_report_${{ inputs.framework }}_${{ env.STR_TEST_TYPE }}_${{ inputs.platform_arch }}_${{ github.run_id }}_${{ job.check_run_id }}.xml
142149 retention-days : 7
0 commit comments