diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 3613c38b10..e81371d8c2 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -23,15 +23,15 @@ jobs: sudo apt install -y cmake ninja-build graphviz graphviz git clone https://github.com/doxygen/doxygen.git ../doxygen cd ../doxygen - git checkout 26b5403 + git checkout Release_1_16_1 cd - cmake -S ../doxygen -B ../doxygen/build -G Ninja sudo ninja -C ../doxygen/build install - name: Build Documentation run: | - pip3 install fypp - cmake -S . -B build -G Ninja --install-prefix=$(pwd)/build/install -D MFC_DOCUMENTATION=ON + pip3 install fypp rich PyYAML + cmake -S . -B build -G Ninja --install-prefix="$(pwd)/build/install" -D MFC_DOCUMENTATION=ON ninja -C build install - name: Upload Built Documentation Artifact diff --git a/.github/workflows/frontier/submit-bench.sh b/.github/workflows/frontier/submit-bench.sh index 4374231eca..fba8249df2 100644 --- a/.github/workflows/frontier/submit-bench.sh +++ b/.github/workflows/frontier/submit-bench.sh @@ -32,7 +32,7 @@ sbatch <), this macro sets up a @@ -706,9 +728,13 @@ if (MFC_DOCUMENTATION) set(opt_example_dependency "") set(opt_constraints_dependency "") + set(opt_cli_reference_dependency "") + set(opt_parameters_dependency "") if (${target} STREQUAL documentation) set(opt_example_dependency "${CMAKE_CURRENT_SOURCE_DIR}/docs/documentation/examples.md") set(opt_constraints_dependency "${CMAKE_CURRENT_SOURCE_DIR}/docs/documentation/case_constraints.md") + set(opt_cli_reference_dependency "${CMAKE_CURRENT_SOURCE_DIR}/docs/documentation/cli-reference.md") + set(opt_parameters_dependency "${CMAKE_CURRENT_SOURCE_DIR}/docs/documentation/parameters.md") endif() file(GLOB_RECURSE ${target}_DOCs CONFIGURE_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/docs/${target}/*") @@ -719,6 +745,8 @@ if (MFC_DOCUMENTATION) DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/${target}-Doxyfile" "${opt_example_dependency}" "${opt_constraints_dependency}" + "${opt_cli_reference_dependency}" + "${opt_parameters_dependency}" "${${target}_SRCs}" "${${target}_DOCs}" COMMAND "${DOXYGEN_EXECUTABLE}" "${target}-Doxyfile" WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" @@ -745,7 +773,7 @@ if (MFC_DOCUMENTATION) ExternalProject_Add(doxygen-awesome-css PREFIX doxygen-awesome-css GIT_REPOSITORY "https://github.com/jothepro/doxygen-awesome-css" - GIT_TAG "df88fe4fdd97714fadfd3ef17de0b4401f804052" + GIT_TAG "v2.4.1" CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" @@ -756,7 +784,8 @@ if (MFC_DOCUMENTATION) set(DOXYGEN_HTML_EXTRA_STYLESHEET "\"${theme_dirpath}/doxygen-awesome.css\"\ - \"${theme_dirpath}/doxygen-awesome-sidebar-only.css\"") + \"${theme_dirpath}/doxygen-awesome-sidebar-only.css\"\ + \"${CMAKE_CURRENT_SOURCE_DIR}/docs/custom.css\"") # > Generate Documentation & Landing Page GEN_DOCS(pre_process "MFC: Pre-Process") diff --git a/README.md b/README.md index 9d5d9042d8..19a1425b91 100644 --- a/README.md +++ b/README.md @@ -199,6 +199,52 @@ then you can build MFC and run the test suite! ``` And... you're done! +## Toolchain Features + +The `mfc.sh` script provides a comprehensive toolchain for building, running, and testing MFC: + +| Command | Description | +|---------|-------------| +| `./mfc.sh build` | Build MFC and its dependencies | +| `./mfc.sh run case.py` | Run a simulation case | +| `./mfc.sh test` | Run the test suite | +| `./mfc.sh validate case.py` | Check a case file for errors before running | +| `./mfc.sh init my_case` | Create a new case from a template | +| `./mfc.sh clean` | Remove build artifacts | +| `./mfc.sh interactive` | Launch interactive menu-driven interface | + +### Quick Start Workflow + +```bash +./mfc.sh init my_first_case # Create a new case from template +./mfc.sh validate my_first_case/case.py # Validate the case file +./mfc.sh build -j $(nproc) # Build MFC +./mfc.sh run my_first_case/case.py # Run the simulation +``` + +### Case Templates + +Create new cases quickly with built-in templates: + +```bash +./mfc.sh init --list # List available templates +./mfc.sh init my_case -t 2D_minimal # Create 2D case +./mfc.sh init my_case -t example:1D_sodshocktube # Copy from examples +``` + +### Shell Completion + +Enable tab completion for commands and options: + +```bash +# Bash +source toolchain/completions/mfc.bash + +# Zsh (add to fpath) +fpath=(path/to/MFC/toolchain/completions $fpath) +autoload -Uz compinit && compinit +``` + You can learn more about MFC's capabilities [via its documentation](https://mflowcode.github.io/documentation/index.html) or play with the examples located in the `examples/` directory (some are [shown here](https://mflowcode.github.io/documentation/md_examples.html))! The shock-droplet interaction case above was run via diff --git a/benchmarks/5eq_rk3_weno3_hllc/case.py b/benchmarks/5eq_rk3_weno3_hllc/case.py index e3f196d02c..64246812b6 100644 --- a/benchmarks/5eq_rk3_weno3_hllc/case.py +++ b/benchmarks/5eq_rk3_weno3_hllc/case.py @@ -6,7 +6,9 @@ # - weno_order : 3 # - riemann_solver : 2 -import json, math, argparse +import json +import math +import argparse parser = argparse.ArgumentParser(prog="Benchmarking Case 1", description="This MFC case was created for the purposes of benchmarking MFC.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) @@ -40,7 +42,7 @@ # the droplet is about D0/8 ISD = 5.0 / 8 * D0 -## pre-shock properties - AIR +# pre-shock properties - AIR # pressure - Pa p0a = patm @@ -57,7 +59,7 @@ # speed of sound - M/s c_a = math.sqrt(gama * (p0a + pia) / rho0a) -## Droplet - WATER +# Droplet - WATER # surface tension - N / m st = 0.00e0 @@ -84,7 +86,7 @@ # Min or psOp0a. Just comment/uncomment appropriately Min = 2.4 -## Pos to pre shock ratios - AIR +# Pos to pre shock ratios - AIR # pressure psOp0a = (Min**2 - 1) * 2 * gama / (gama + 1) + 1 @@ -99,7 +101,7 @@ # shock speed of sound - m/s ss = Ms * c_a -## post-shock - AIR +# post-shock - AIR # pressure - Pa ps = psOp0a * p0a @@ -113,7 +115,7 @@ # velocity at the post shock - m/s vel = c_a / gama * (psOp0a - 1.0) * p0a / (p0a + pia) / Ms -## Domain boundaries - m +# Domain boundaries - m # x direction xb = -8.4707 * D0 @@ -156,7 +158,7 @@ # Save Frequency. Note that the number of autosaves will be SF + 1, as th IC (0.dat) is also saved SF = 400 -## making Nt divisible by SF +# making Nt divisible by SF # 1 - ensure NtA goes slightly beyond tendA NtA = int(tendA // dt + 1) diff --git a/benchmarks/hypo_hll/case.py b/benchmarks/hypo_hll/case.py index 5481c1220d..a5e51a6965 100644 --- a/benchmarks/hypo_hll/case.py +++ b/benchmarks/hypo_hll/case.py @@ -4,7 +4,9 @@ # - hypoelasticity : T # - riemann_solver : 1 -import json, math, argparse +import json +import math +import argparse parser = argparse.ArgumentParser(prog="Benchmarkin Case 3", description="This MFC case was created for the purposes of benchmarking MFC.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) diff --git a/benchmarks/ibm/case.py b/benchmarks/ibm/case.py index a19b3e0947..e8be264df8 100644 --- a/benchmarks/ibm/case.py +++ b/benchmarks/ibm/case.py @@ -3,7 +3,9 @@ # Additional Benchmarked Features # - ibm : T -import json, math, argparse +import json +import math +import argparse parser = argparse.ArgumentParser(prog="Benchmarking Case 4", description="This MFC case was created for the purposes of benchmarking MFC.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) diff --git a/benchmarks/igr/case.py b/benchmarks/igr/case.py index cb63bb83e6..8d38ad2fa1 100644 --- a/benchmarks/igr/case.py +++ b/benchmarks/igr/case.py @@ -5,7 +5,9 @@ # - viscous : T # - igr_order : 5 -import json, math, argparse +import json +import math +import argparse parser = argparse.ArgumentParser(prog="Benchmarking Case 5", description="This MFC case was created for the purposes of benchmarking MFC.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) diff --git a/benchmarks/viscous_weno5_sgb_acoustic/case.py b/benchmarks/viscous_weno5_sgb_acoustic/case.py index 614698b649..c422fed990 100644 --- a/benchmarks/viscous_weno5_sgb_acoustic/case.py +++ b/benchmarks/viscous_weno5_sgb_acoustic/case.py @@ -8,7 +8,9 @@ # - bubble_model : 3 # - acoustic_source : T -import json, math, argparse +import json +import math +import argparse parser = argparse.ArgumentParser(prog="Benchmarking Case 2", description="This MFC case was created for the purposes of benchmarking MFC.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) diff --git a/docs/custom.css b/docs/custom.css new file mode 100644 index 0000000000..e12863705e --- /dev/null +++ b/docs/custom.css @@ -0,0 +1,133 @@ +/** + * MFC Documentation Custom Styles + * Overrides for doxygen-awesome theme + */ + +/* Fix inline code visibility in colored admonition blocks (warning, attention, important, note, etc.) */ + +/* Warning/Attention/Important blocks (red/pink background) */ +dl.warning .tt, dl.attention .tt, dl.important .tt, +dl.warning code, dl.attention code, dl.important code { + background-color: rgba(255, 255, 255, 0.5); + color: #5a0a0f; + border-color: rgba(0, 0, 0, 0.15); +} + +/* Note/Remark blocks (yellow/blue background) */ +dl.note .tt, dl.remark .tt, +dl.note code, dl.remark code { + background-color: rgba(255, 255, 255, 0.5); + color: #3a3000; + border-color: rgba(0, 0, 0, 0.15); +} + +/* Todo blocks (purple background) */ +dl.todo .tt, dl.todo code { + background-color: rgba(255, 255, 255, 0.5); + color: #2a1050; + border-color: rgba(0, 0, 0, 0.15); +} + +/* Bug blocks */ +dl.bug .tt, dl.bug code { + background-color: rgba(255, 255, 255, 0.5); + color: #5a0a0f; + border-color: rgba(0, 0, 0, 0.15); +} + +/* Deprecated blocks */ +dl.deprecated .tt, dl.deprecated code { + background-color: rgba(255, 255, 255, 0.5); + color: #333; + border-color: rgba(0, 0, 0, 0.15); +} + +/* Invariant/Pre/Post blocks */ +dl.invariant .tt, dl.pre .tt, dl.post .tt, +dl.invariant code, dl.pre code, dl.post code { + background-color: rgba(255, 255, 255, 0.5); + color: #0a3a5a; + border-color: rgba(0, 0, 0, 0.15); +} + +/* Dark mode overrides */ +@media (prefers-color-scheme: dark) { + dl.warning .tt, dl.attention .tt, dl.important .tt, + dl.warning code, dl.attention code, dl.important code { + background-color: rgba(0, 0, 0, 0.3); + color: #ffcccc; + border-color: rgba(255, 255, 255, 0.15); + } + + dl.note .tt, dl.remark .tt, + dl.note code, dl.remark code { + background-color: rgba(0, 0, 0, 0.3); + color: #cce5ff; + border-color: rgba(255, 255, 255, 0.15); + } + + dl.todo .tt, dl.todo code { + background-color: rgba(0, 0, 0, 0.3); + color: #e0d0ff; + border-color: rgba(255, 255, 255, 0.15); + } + + dl.bug .tt, dl.bug code { + background-color: rgba(0, 0, 0, 0.3); + color: #ffcccc; + border-color: rgba(255, 255, 255, 0.15); + } + + dl.deprecated .tt, dl.deprecated code { + background-color: rgba(0, 0, 0, 0.3); + color: #d0d0d0; + border-color: rgba(255, 255, 255, 0.15); + } + + dl.invariant .tt, dl.pre .tt, dl.post .tt, + dl.invariant code, dl.pre code, dl.post code { + background-color: rgba(0, 0, 0, 0.3); + color: #cce5ff; + border-color: rgba(255, 255, 255, 0.15); + } +} + +/* doxygen-awesome dark mode class-based detection */ +html.dark-mode dl.warning .tt, html.dark-mode dl.attention .tt, html.dark-mode dl.important .tt, +html.dark-mode dl.warning code, html.dark-mode dl.attention code, html.dark-mode dl.important code { + background-color: rgba(0, 0, 0, 0.3); + color: #ffcccc; + border-color: rgba(255, 255, 255, 0.15); +} + +html.dark-mode dl.note .tt, html.dark-mode dl.remark .tt, +html.dark-mode dl.note code, html.dark-mode dl.remark code { + background-color: rgba(0, 0, 0, 0.3); + color: #cce5ff; + border-color: rgba(255, 255, 255, 0.15); +} + +html.dark-mode dl.todo .tt, html.dark-mode dl.todo code { + background-color: rgba(0, 0, 0, 0.3); + color: #e0d0ff; + border-color: rgba(255, 255, 255, 0.15); +} + +html.dark-mode dl.bug .tt, html.dark-mode dl.bug code { + background-color: rgba(0, 0, 0, 0.3); + color: #ffcccc; + border-color: rgba(255, 255, 255, 0.15); +} + +html.dark-mode dl.deprecated .tt, html.dark-mode dl.deprecated code { + background-color: rgba(0, 0, 0, 0.3); + color: #d0d0d0; + border-color: rgba(255, 255, 255, 0.15); +} + +html.dark-mode dl.invariant .tt, html.dark-mode dl.pre .tt, html.dark-mode dl.post .tt, +html.dark-mode dl.invariant code, html.dark-mode dl.pre code, html.dark-mode dl.post code { + background-color: rgba(0, 0, 0, 0.3); + color: #cce5ff; + border-color: rgba(255, 255, 255, 0.15); +} diff --git a/docs/documentation/authors.md b/docs/documentation/authors.md index 42e2867f62..b346486112 100644 --- a/docs/documentation/authors.md +++ b/docs/documentation/authors.md @@ -1,3 +1,5 @@ +@page authors Authors + ## Authors Contributors to MFC since 2019 can be [viewed here](https://github.com/MFlowCode/MFC/graphs/contributors). diff --git a/docs/documentation/case.md b/docs/documentation/case.md index a99ad36049..90e4e5e147 100644 --- a/docs/documentation/case.md +++ b/docs/documentation/case.md @@ -1,3 +1,5 @@ +@page case Case Files + # Case Files Example Python case files, also referred to as *input files*, can be found in the [examples/](https://github.com/MFlowCode/MFC/tree/master/examples) directory. They print a Python dictionary containing input parameters for MFC. Their contents, and a guide to filling them out, are documented in the user manual. A commented, tutorial script @@ -70,12 +72,14 @@ For example, to run the `scaling` case in "weak-scaling" mode: ## Feature Compatibility -Before diving into parameter details, check the **[Feature Compatibility Guide](case_constraints.md)** to understand: +Before diving into parameter details, check the **@ref case_constraints "Feature Compatibility Guide"** to understand: - Which features work together (MHD, bubbles, phase change, etc.) - Common configuration patterns with copy-paste examples - Requirements for each model equation and Riemann solver -💡 **Tip:** If you get a validation error, the compatibility guide explains what each parameter requires. +**Parameter Lookup:** +- CLI search: `./mfc.sh params ` - Search ~3,300 parameters from the command line +- Full reference: **@ref parameters "Case Parameters"** - Complete parameter documentation There are multiple sets of parameters that must be specified in the python input file: 1. [Runtime Parameters](#1-runtime) @@ -377,7 +381,7 @@ The parameters define material's property of compressible fluids that are used i - `fluid_pp(i)%%Re(1)` and `fluid_pp(i)%%Re(2)` define the shear and volume viscosities of $i$-th fluid, respectively. When these parameters are undefined, fluids are treated as inviscid. -Details of implementation of viscosity in MFC can be found in [Coralic (2015)](references.md). +Details of implementation of viscosity in MFC can be found in [Coralic (2015)](@ref references). - `fluid_pp(i)%%cv`, `fluid_pp(i)%%qv`, and `fluid_pp(i)%%qvp` define $c_v$, $q$, and $q'$ as parameters of $i$-th fluid that are used in stiffened gas equation of state. @@ -434,7 +438,7 @@ Details of implementation of viscosity in MFC can be found in [Coralic (2015)](r | `surface_tension` | Logical | Activate surface tension | | `viscous` | Logical | Activate viscosity | | `hypoelasticity` | Logical | Activate hypoelasticity* | -| `igr` | Logical | Enable solution via information geometric regularization (IGR) [Cao (2024)](references.md) | +| `igr` | Logical | Enable solution via information geometric regularization (IGR) [Cao (2024)](@ref references) | | `igr_order` | Integer | Order of reconstruction for IGR [3,5] | | `alf_factor` | Real | Alpha factor for IGR entropic pressure (default 10) | | `igr_pres_lim` | Logical | Limit IGR pressure to avoid negative values (default F) | @@ -448,8 +452,8 @@ Details of implementation of viscosity in MFC can be found in [Coralic (2015)](r The table lists simulation algorithm parameters. The parameters are used to specify options in algorithms that are used to integrate the governing equations of the multi-component flow based on the initial condition. -Models and assumptions that are used to formulate and discritize the governing equations are described in [Bryngelson et al. (2019)](references.md). -Details of the simulation algorithms and implementation of the WENO scheme can be found in [Coralic (2015)](references.md). +Models and assumptions that are used to formulate and discretize the governing equations are described in [Bryngelson et al. (2019)](@ref references). +Details of the simulation algorithms and implementation of the WENO scheme can be found in [Coralic (2015)](@ref references). - `bc_[x,y,z]%[beg,end]` specifies the boundary conditions at the beginning and the end of domain boundaries in each coordinate direction by a negative integer from -1 through -16. See table [Boundary Conditions](#boundary-conditions) for details. @@ -463,12 +467,12 @@ Tangential velocities require viscosity, `weno_avg = T`, and `bc_[x,y,z]%%beg = Tangential velocities require viscosity, `weno_avg = T`, and `bc_[x,y,z]%%end = 16` to work properly. Normal velocities require `bc_[x,y,z]%%end = -15` or `\bc_[x,y,z]%%end = -16` to work properly. - `model_eqns` specifies the choice of the multi-component model that is used to formulate the dynamics of the flow using integers from 1 through 3. -`model_eqns = 1`, `2`, and `3` correspond to $\Gamma$-$\Pi_\infty$ model ([Johnsen, 2008](references.md)), 5-equation model ([Allaire et al., 2002](references.md)), and 6-equation model ([Saurel et al., 2009](references.md)), respectively. -The difference of the two models is assessed by ([Schmidmayer et al., 2019](references.md)). +`model_eqns = 1`, `2`, and `3` correspond to $\Gamma$-$\Pi_\infty$ model ([Johnsen, 2008](@ref references)), 5-equation model ([Allaire et al., 2002](@ref references)), and 6-equation model ([Saurel et al., 2009](@ref references)), respectively. +The difference of the two models is assessed by ([Schmidmayer et al., 2019](@ref references)). Note that some code parameters are only compatible with 5-equation model. - `alt_soundspeed` activates the source term in the advection equations for the volume fractions, $K\nabla\cdot \underline{u}$, that regularizes the speed of sound in the mixture region when the 5-equation model is used. -The effect and use of the source term are assessed by [Schmidmayer et al., 2019](references.md). +The effect and use of the source term are assessed by [Schmidmayer et al., 2019](@ref references). - `adv_n` activates the direct computation of number density by the Riemann solver instead of computing number density from the void fraction in the method of classes. @@ -477,7 +481,7 @@ The effect and use of the source term are assessed by [Schmidmayer et al., 2019] - `mixture_err` activates correction of solutions to avoid imaginary speed of sound at each grid cell. - `time_stepper` specifies the order of the Runge-Kutta (RK) time integration scheme that is used for temporal integration in simulation, from the 1st to 5th order by corresponding integer. -Note that `time_stepper = 3` specifies the total variation diminishing (TVD), third order RK scheme ([Gottlieb and Shu, 1998](references.md)). +Note that `time_stepper = 3` specifies the total variation diminishing (TVD), third order RK scheme ([Gottlieb and Shu, 1998](@ref references)). - `adap_dt` activates the Strang operator splitting scheme which splits flux and source terms in time marching, and an adaptive time stepping strategy is implemented for the source term. It requires ``bubbles_euler = 'T'``, ``polytropic = 'T'``, ``adv_n = 'T'`` and `time_stepper = 3`. Additionally, it can be used with ``bubbles_lagrange = 'T'`` and `time_stepper = 3`. `adap_dt_tol` and `adap_dt_max_iters` are 1e-4 and 100, respectively, by default. @@ -486,19 +490,19 @@ Note that `time_stepper = 3` specifies the total variation diminishing (TVD), th - `weno_eps` specifies the lower bound of the WENO nonlinear weights. It is recommended to set `weno_eps` to $10^{-6}$ for WENO-JS, and to $10^{-40}$ for other WENO variants. -- `mapped_weno` activates the WENO-M scheme in place of the default WENO-JS scheme ([Henrick et al., 2005](references.md)). WENO-M a variant of the WENO scheme that remaps the nonlinear WENO-JS weights by assigning larger weights to non-smooth stencils, reducing dissipation compared to the default WENO-JS scheme, at the expense of higher computational cost. Only one of `mapped_weno`, `wenoz`, and `teno` can be activated. +- `mapped_weno` activates the WENO-M scheme in place of the default WENO-JS scheme ([Henrick et al., 2005](@ref references)). WENO-M a variant of the WENO scheme that remaps the nonlinear WENO-JS weights by assigning larger weights to non-smooth stencils, reducing dissipation compared to the default WENO-JS scheme, at the expense of higher computational cost. Only one of `mapped_weno`, `wenoz`, and `teno` can be activated. -- `wenoz` activates the WENO-Z scheme in place of the default WENO-JS scheme ([Borges et al., 2008](references.md)). WENO-Z is a variant of the WENO scheme that further reduces the dissipation compared to the WENO-M scheme. It has similar computational cost to the WENO-JS scheme. +- `wenoz` activates the WENO-Z scheme in place of the default WENO-JS scheme ([Borges et al., 2008](@ref references)). WENO-Z is a variant of the WENO scheme that further reduces the dissipation compared to the WENO-M scheme. It has similar computational cost to the WENO-JS scheme. - `wenoz_q` specifies the power parameter `q` used in the WENO-Z scheme. It controls how aggressively the smoothness coefficients scale the weights. A higher value of `wenoz_q` increases the sensitivity to smoothness, improving stability but worsening numerical dissipation. For WENO3 and WENO5, `q=1` is fixed, so `wenoz_q` must not be set. For WENO7, `wenoz_q` can be set to 2, 3, or 4. -- `teno` activates the TENO scheme in place of the default WENO-JS scheme ([Fu et al., 2016](references.md)). TENO is a variant of the ENO scheme that is the least dissipative, but could be less robust for extreme cases. It uses a threshold to identify smooth and non-smooth stencils, and applies optimal weights to the smooth stencils. Only available for `weno_order = 5` and `7`. Requires `teno_CT` to be set. Does not support grid stretching. +- `teno` activates the TENO scheme in place of the default WENO-JS scheme ([Fu et al., 2016](@ref references)). TENO is a variant of the ENO scheme that is the least dissipative, but could be less robust for extreme cases. It uses a threshold to identify smooth and non-smooth stencils, and applies optimal weights to the smooth stencils. Only available for `weno_order = 5` and `7`. Requires `teno_CT` to be set. Does not support grid stretching. - `teno_CT` specifies the threshold for the TENO scheme. This dimensionless constant, also known as $C_T$, sets a threshold to identify smooth and non-smooth stencils. Larger values make the scheme more robust but also more dissipative. A recommended value for teno_CT is `1e-6`. When adjusting this parameter, it is recommended to try values like `1e-5` or `1e-7` for TENO5. A smaller value can be used for TENO7. - `null_weights` activates nullification of the nonlinear WENO weights at the buffer regions outside the domain boundaries when the Riemann extrapolation boundary condition is specified (`bc_[x,y,z]%%beg[end]} = -4`). -- `mp_weno` activates monotonicity preservation in the WENO reconstruction (MPWENO) such that the values of reconstructed variables do not reside outside the range spanned by WENO stencil ([Balsara and Shu, 2000](references.md); [Suresh and Huynh, 1997](references.md)). +- `mp_weno` activates monotonicity preservation in the WENO reconstruction (MPWENO) such that the values of reconstructed variables do not reside outside the range spanned by WENO stencil ([Balsara and Shu, 2000](@ref references); [Suresh and Huynh, 1997](@ref references)). - `muscl_order` specifies the order of the MUSCL scheme that is used for spatial reconstruction of variables by an integer of 1, or 2, that corresponds to the 1st, and 2nd order respectively. When using `muscl_order = 2`, `muscl_lim` must be defined. @@ -508,16 +512,16 @@ It is recommended to set `weno_eps` to $10^{-6}$ for WENO-JS, and to $10^{-40}$ - `int_comp` activates interface compression using THINC used in MUSCL Reconstruction, with control parameters (`ic_eps`, and `ic_beta`). - `riemann_solver` specifies the choice of the Riemann solver that is used in simulation by an integer from 1 through 4. -`riemann_solver = 1`, `2`, and `3` correspond to HLL, HLLC, and Exact Riemann solver, respectively ([Toro, 2013](references.md)). -`riemann_solver = 4` is only for MHD simulations. It resolves 5 of the full seven-wave structure of the MHD equations ([Miyoshi and Kusano, 2005](references.md)). +`riemann_solver = 1`, `2`, and `3` correspond to HLL, HLLC, and Exact Riemann solver, respectively ([Toro, 2013](@ref references)). +`riemann_solver = 4` is only for MHD simulations. It resolves 5 of the full seven-wave structure of the MHD equations ([Miyoshi and Kusano, 2005](@ref references)). -- `low_Mach` specifies the choice of the low Mach number correction scheme for the HLLC Riemann solver. `low_Mach = 0` is default value and does not apply any correction scheme. `low_Mach = 1` and `2` apply the anti-dissipation pressure correction method ([Chen et al., 2022](references.md)) and the improved velocity reconstruction method ([Thornber et al., 2008](references.md)). This feature requires `model_eqns = 2` or `3`. `low_Mach = 1` works for `riemann_solver = 1` and `2`, but `low_Mach = 2` only works for `riemann_solver = 2`. +- `low_Mach` specifies the choice of the low Mach number correction scheme for the HLLC Riemann solver. `low_Mach = 0` is default value and does not apply any correction scheme. `low_Mach = 1` and `2` apply the anti-dissipation pressure correction method ([Chen et al., 2022](@ref references)) and the improved velocity reconstruction method ([Thornber et al., 2008](@ref references)). This feature requires `model_eqns = 2` or `3`. `low_Mach = 1` works for `riemann_solver = 1` and `2`, but `low_Mach = 2` only works for `riemann_solver = 2`. - `avg_state` specifies the choice of the method to compute averaged variables at the cell-boundaries from the left and the right states in the Riemann solver by an integer of 1 or 2. `avg_state = 1` and `2` correspond to Roe- and arithmetic averages, respectively. - `wave_speeds` specifies the choice of the method to compute the left, right, and middle wave speeds in the Riemann solver by an integer of 1 and 2. -`wave_speeds = 1` and `2` correspond to the direct method ([Batten et al., 1997](references.md)), and indirect method that approximates the pressures and velocity ([Toro, 2013](references.md)), respectively. +`wave_speeds = 1` and `2` correspond to the direct method ([Batten et al., 1997](@ref references)), and indirect method that approximates the pressures and velocity ([Toro, 2013](@ref references)), respectively. - `weno_Re_flux` activates the scalar divergence theorem in computing the velocity gradients using WENO-reconstructed cell boundary values. If this option is false, velocity gradient is computed using finite difference scheme of order 2 which is independent of the WENO order. @@ -563,7 +567,7 @@ The value of `dt` needs to be sufficiently small to satisfy the Courant-Friedric `t_step_save` is the time step interval for data output during simulation. To newly start the simulation, set `t_step_start = 0`. -To restart the simulation from $k$-th time step, set `t_step_start = k`; see [Restarting Cases](running.md). +To restart the simulation from $k$-th time step, set `t_step_start = k`; see @ref running "Restarting Cases". ##### Adaptive Time-Stepping @@ -580,7 +584,7 @@ To restart the simulation from $k$-th time step, set `t_step_start = k`; see [Re - `t_stop` specifies at what time the simulation should stop To newly start the simulation, set `n_start = 0`. -To restart the simulation from $k$-th time step, see [Restarting Cases](running.md). +To restart the simulation from $k$-th time step, see @ref running "Restarting Cases". ### 7. Formatted Output @@ -698,7 +702,7 @@ It also cannot be enabled with `flux_wrt`, `heat_ratio_wrt`, `pres_inf_wrt`, `c_ | `acoustic(i)%%bb_bandwidth` | Real | The bandwidth of each frequency in the broadband wave | | `acoustic(i)%%bb_lowest_freq` | Real | The lower frequency bound of the broadband wave | -Details of the transducer acoustic source model can be found in [Maeda and Colonius (2017)](references.md). +Details of the transducer acoustic source model can be found in [Maeda and Colonius (2017)](@ref references). - `acoustic_source` activates the acoustic source module. @@ -710,7 +714,7 @@ Details of the transducer acoustic source model can be found in [Maeda and Colon - `%%loc(j)` specifies the location of the acoustic source in the $j$-th coordinate direction. For planer support, the location defines midpoint of the source plane. For transducer arrays, the location defines the center of the transducer or transducer array (not the focal point; for 3D it's the tip of the spherical cap, for 2D it's the tip of the arc). -- `%%pulse` specifies the acoustic wave form. `%%pulse = 1`, `2`, `3` and `4` correspond to sinusoidal wave, Gaussian wave, square wave and broadband wave, respectively. The implementation of the broadband wave is based on [Tam (2005)](references.md) +- `%%pulse` specifies the acoustic wave form. `%%pulse = 1`, `2`, `3` and `4` correspond to sinusoidal wave, Gaussian wave, square wave and broadband wave, respectively. The implementation of the broadband wave is based on [Tam (2005)](@ref references) - `%%npulse` specifies the number of cycles of the acoustic wave generated. Only applies to `%%pulse = 1 and 3` (sine and square waves), and must be an integer for non-planar waves. @@ -791,7 +795,7 @@ This table lists the sub-grid bubble model parameters, which can be utilized in - `bub_pp` specifies simulation parameters for the EE and/or EL bubble model. -Implementation of the parameters into the model follow [Ando (2010)](references.md). +Implementation of the parameters into the model follows [Ando (2010)](@ref references). #### 9.1 Ensemble-Averaged Bubble Model @@ -812,10 +816,10 @@ Implementation of the parameters into the model follow [Ando (2010)](references. This table lists the ensemble-averaged bubble model parameters. - `polytropic` activates polytropic gas compression in the bubble. -When ``polytropic = 'F'``, the gas compression is modeled as non-polytropic due to heat and mass transfer across the bubble wall with constant heat and mass transfer coefficients based on ([Preston et al., 2007](references.md)). +When ``polytropic = 'F'``, the gas compression is modeled as non-polytropic due to heat and mass transfer across the bubble wall with constant heat and mass transfer coefficients based on ([Preston et al., 2007](@ref references)). - `thermal` specifies a model for heat transfer across the bubble interface by an integer from 1 through 3. -`thermal = 1`, `2`, and `3` correspond to no heat transfer (adiabatic gas compression), isothermal heat transfer, and heat transfer with a constant heat transfer coefficient based on [Preston et al., 2007](references.md), respectively. +`thermal = 1`, `2`, and `3` correspond to no heat transfer (adiabatic gas compression), isothermal heat transfer, and heat transfer with a constant heat transfer coefficient based on [Preston et al., 2007](@ref references), respectively. - `polydisperse` activates polydispersity in the bubble model through a probability density function (PDF) of the equilibrium bubble radius. Simpson's rule is used for integrating the log-normal PDF of equilibrium bubble radius for polydisperse populations. @@ -853,15 +857,15 @@ When ``polytropic = 'F'``, the gas compression is modeled as non-polytropic due - `nBubs_glb` Total number of bubbles. Their initial conditions need to be specified in the ./input/lag_bubbles.dat file. See the example cases for additional information. -- `solver_approach` Specifies the Euler-Lagrange coupling method: [1] enables a one-way coupling approach, where the bubbles do not influence the Eulerian field. [2] activates the two-way coupling approach based on [Maeda and Colonius (2018)](references.md), where the effect of the bubbles is added in the Eulerian field as source terms. +- `solver_approach` Specifies the Euler-Lagrange coupling method: [1] enables a one-way coupling approach, where the bubbles do not influence the Eulerian field. [2] activates the two-way coupling approach based on [Maeda and Colonius (2018)](@ref references), where the effect of the bubbles is added in the Eulerian field as source terms. -- `cluster_type` Specifies method to find p_inf (pressure that drives the bubble dynamics): [1] activates the bilinear interpolation of the pressure field, while [2] enables the bubble dynamic closure based on [Maeda and Colonius (2018)](references.md), the full model is obtained when `pressure_corrector` is true. +- `cluster_type` Specifies method to find p_inf (pressure that drives the bubble dynamics): [1] activates the bilinear interpolation of the pressure field, while [2] enables the bubble dynamic closure based on [Maeda and Colonius (2018)](@ref references), the full model is obtained when `pressure_corrector` is true. -- `smooth_type` Specifies the smoothening method of projecting the lagrangian bubbles in the Eulerian field: [1] activates the gaussian kernel function described in [Maeda and Colonius (2018)](references.md), while [2] activates the delta kernel function where the effect of the bubble is only seen in the specific bubble location cell. +- `smooth_type` Specifies the smoothening method of projecting the lagrangian bubbles in the Eulerian field: [1] activates the gaussian kernel function described in [Maeda and Colonius (2018)](@ref references), while [2] activates the delta kernel function where the effect of the bubble is only seen in the specific bubble location cell. -- `heatTransfer_model` Activates the heat transfer model at the bubble's interface based on ([Preston et al., 2007](references.md)). +- `heatTransfer_model` Activates the heat transfer model at the bubble's interface based on ([Preston et al., 2007](@ref references)). -- `massTransfer_model` Activates the mass transfer model at the bubble's interface based on ([Preston et al., 2007](references.md)). +- `massTransfer_model` Activates the mass transfer model at the bubble's interface based on ([Preston et al., 2007](@ref references)). ### 10. Velocity Field Setup @@ -953,7 +957,7 @@ By convention, positive accelerations in the `x[y,z]` direction are in the posit - `relativity` only works for `mhd` enabled and activates relativistic MHD (RMHD) simulation. -- `hyper_cleaning` [Dedner et al., 2002](references.md) only works with `mhd` in 2D/3D and reduces numerical `div B` errors by propagation and damping. Currently not compatible with HLLD (`riemann_solver = 4`). +- `hyper_cleaning` [Dedner et al., 2002](@ref references) only works with `mhd` in 2D/3D and reduces numerical `div B` errors by propagation and damping. Currently not compatible with HLLD (`riemann_solver = 4`). - `hyper_cleaning_speed` sets the propagation speed of divergence-cleaning waves. @@ -978,7 +982,7 @@ Note: For relativistic flow, the conservative and primitive densities are differ | `cont_damage_s` | Real | Power `s` for continuum damage model | | `alpha_bar` | Real | Damage factor (rate) for continuum damage model | -- `cont_damage` activates continuum damage model for solid materials. Requires `tau_star`, `cont_damage_s`, and `alpha_bar` to be set (empirically determined) ([Cao et al., 2019](references.md)). +- `cont_damage` activates continuum damage model for solid materials. Requires `tau_star`, `cont_damage_s`, and `alpha_bar` to be set (empirically determined) ([Cao et al., 2019](@ref references)). ### 16. Cylindrical Coordinates @@ -1038,7 +1042,7 @@ When ``cyl_coord = 'T'`` is set in 2D the following constraints must be met: The boundary condition supported by the MFC are listed in table [Boundary Conditions](#boundary-conditions). Their number (`#`) corresponds to the input value in `input.py` labeled `bc_[x,y,z]%[beg,end]` (see table [Simulation Algorithm Parameters](#5-simulation-algorithm)). -The entries labeled "Characteristic." are characteristic boundary conditions based on [Thompson (1987)](references.md) and [Thompson (1990)](references.md). +The entries labeled "Characteristic." are characteristic boundary conditions based on [Thompson (1987)](@ref references) and [Thompson (1990)](@ref references). ### Generalized Characteristic Boundary conditions @@ -1054,7 +1058,7 @@ The entries labeled "Characteristic." are characteristic boundary conditions bas | `bc_[x,y,z]%alpha_rho_in` | Real Array | Inflow density | | `bc_[x,y,z]%alpha_in` | Real Array | Inflow void fraction | -This boundary condition can be used for subsonic inflow (`bc_[x,y,z]%[beg,end]` = -7) and subsonic outflow (`bc_[x,y,z]%[beg,end]` = -8) characteristic boundary conditions. These are based on [Pirozzoli (2013)](references.md). This enables to provide inflow and outflow conditions outside the computational domain. +This boundary condition can be used for subsonic inflow (`bc_[x,y,z]%[beg,end]` = -7) and subsonic outflow (`bc_[x,y,z]%[beg,end]` = -8) characteristic boundary conditions. These are based on [Pirozzoli (2013)](@ref references). This enables to provide inflow and outflow conditions outside the computational domain. ### Patch types @@ -1138,7 +1142,7 @@ The midplane location is [`%%loc(1)`, `%%loc(2)`] and the normal vector is [$\ma - `%%support = 3` specifies a semi-infinite source plane in 3D simulation. The midplane location is [`%%loc(1)`, `%%loc(2)`] and the normal vector is [$\mathrm{cos}$(`%%dir`), $\mathrm{sin}$(`%%dir`)]. The length of the source plane is `%%length`, and the plane is perpendicular to the direction of wave propagation (defined by `%%dir`). Note that the plane is infinite in the $z$-direction, so `%%loc(3)` is not required. -- `%%support = 5` specifies a circular transducer in 2D simulation. The transducer is centered at [`%%loc(1)`, `%%loc(2)`] with a focal length of `%%foc_length` and an aperture of `%%aperture`. The center location is not the focal point; it is the tip of the circular arc (intersection of the arc and the x-axis). The aperture is the length of the projection of the circular arc onto the y-axis. If a semi-circle is desired, set the aperture to double the focal length. Note that this is physically a cylindrical transducer, and due to the complexity of Green's function for 2D wave, no closed-form solution is available for the 2D circular transducer, and an approximate is used (see [Maeda and Colonius (2017)](references.md) for details). For the mass source term correction factor, the theoretical approximation factor of -0.5 in ($r_{foc}^{-0.5}$) is replaced by an empirically determined factor of -0.85. +- `%%support = 5` specifies a circular transducer in 2D simulation. The transducer is centered at [`%%loc(1)`, `%%loc(2)`] with a focal length of `%%foc_length` and an aperture of `%%aperture`. The center location is not the focal point; it is the tip of the circular arc (intersection of the arc and the x-axis). The aperture is the length of the projection of the circular arc onto the y-axis. If a semi-circle is desired, set the aperture to double the focal length. Note that this is physically a cylindrical transducer, and due to the complexity of Green's function for 2D wave, no closed-form solution is available for the 2D circular transducer, and an approximate is used (see [Maeda and Colonius (2017)](@ref references) for details). For the mass source term correction factor, the theoretical approximation factor of -0.5 in ($r_{foc}^{-0.5}$) is replaced by an empirically determined factor of -0.85. - `%%support = 6` specifies a spherical transducer in 2D axisymmetric simulation. It is identical to `%%support = 5` in terms of simulation parameters. Note that this is physically a spherical 3D transducer, so the equation is exact. diff --git a/docs/documentation/docker.md b/docs/documentation/docker.md index 16e41c06f0..8a207649cc 100644 --- a/docs/documentation/docker.md +++ b/docs/documentation/docker.md @@ -1,3 +1,5 @@ +@page docker Containers + # Containers ## Navigating Docker @@ -38,8 +40,8 @@ docker run -it --rm --entrypoint bash --platform linux/amd64 sbryngelson/mfc:lat **What's Next?** Once a container has started, the primary working directory is `/opt/MFC`, and all necessary files are located there. -You can check out the usual MFC documentation, such as the [Example Cases](examples.md), to get familiar with running cases. -Then, review the [Case Files](case.md) to write a custom case file. +You can check out the usual MFC documentation, such as the @ref examples "Example Cases", to get familiar with running cases. +Then, review the @ref case "Case Files" to write a custom case file. ## Details on Running Containers diff --git a/docs/documentation/expectedPerformance.md b/docs/documentation/expectedPerformance.md index d01ddee7fa..e5daec9496 100644 --- a/docs/documentation/expectedPerformance.md +++ b/docs/documentation/expectedPerformance.md @@ -1,9 +1,95 @@ +@page expectedPerformance Performance + # Performance +This page covers how to achieve maximum performance with MFC, including optimization techniques and benchmark results across various hardware platforms. + +--- + +## Achieving Maximum Performance + +### Case Optimization (Recommended) + +The single most impactful optimization is **case optimization**, which can provide **up to 10x speedup** for both CPU and GPU runs. + +Case optimization works by hard-coding your simulation parameters at compile time, enabling aggressive compiler optimizations (loop unrolling, constant propagation, dead code elimination). + +**Basic usage:** +```shell +./mfc.sh run case.py --case-optimization -j 8 +``` + +This automatically: +1. Generates optimized source code with your parameters hard-coded +2. Builds an optimized binary for your specific case +3. Runs the simulation with the optimized binary +4. Caches the build for repeated runs with the same parameters + +**For batch jobs:** +```shell +./mfc.sh run case.py --case-optimization -j 8 -e batch -N 4 -n 8 +``` + +**Build separately (optional):** +```shell +./mfc.sh build -i case.py --case-optimization -j 8 +./mfc.sh run case.py +``` + +#### When to use case optimization + +| Use Case | Recommended? | +|----------|--------------| +| Production simulations | **Yes** | +| Large-scale HPC runs | **Yes** | +| Benchmarking | **Yes** | +| Rapid iteration/debugging | No (rebuilds on parameter changes) | +| Parameter sweeps | No (many different configurations) | + +### Other Optimization Flags + +| Flag | Description | +|------|-------------| +| `--gpu` | Enable GPU acceleration | +| `-j N` | Parallel build with N threads | +| `--fastmath` | Faster (less precise) floating-point math | + +### Profiling for Optimization + +Use profiling tools to identify bottlenecks: + +**NVIDIA GPUs:** +```shell +./mfc.sh run case.py --nsys # Timeline profiling (Nsight Systems) +./mfc.sh run case.py --ncu # Kernel profiling (Nsight Compute) +``` + +**AMD GPUs:** +```shell +./mfc.sh run case.py --rsys # Timeline profiling (rocprof-systems) +./mfc.sh run case.py --rcu # Kernel profiling (rocprof-compute) +``` + +See @ref running "Running" for detailed profiling instructions. + +### Performance Checklist + +Before running large simulations: + +1. **Build with optimization:** Use `--case-optimization` for production runs +2. **Use GPU acceleration:** Build with `--gpu` on GPU systems +3. **Match ranks to hardware:** One MPI rank per GPU, or match CPU cores +4. **Verify GPU usage:** Check with `nvidia-smi` or `rocm-smi` during runs +5. **Profile first:** Run a short simulation with profiling to identify issues + +--- + +## Benchmark Results + MFC has been benchmarked on several CPUs and GPU devices. -This page is a summary of these results. +This section summarizes these results. -## Figure of merit: Grind time performance +### Figure of merit: Grind time performance The following table outlines observed performance as nanoseconds per grid point (ns/gp) per equation (eq) per right-hand side (rhs) evaluation (lower is better), also known as the grind time. We solve an example 3D, inviscid, 5-equation model problem with two advected species (8 PDEs) and 8M grid points (158-cubed uniform grid). diff --git a/docs/documentation/getting-started.md b/docs/documentation/getting-started.md index a2e1990455..47befe309f 100644 --- a/docs/documentation/getting-started.md +++ b/docs/documentation/getting-started.md @@ -1,3 +1,5 @@ +@page getting-started Getting Started + # Getting Started ## Fetching MFC @@ -139,13 +141,13 @@ MFC can be built with support for various (compile-time) features: | Feature | Enable | Disable | Default | Description | | :----------------: | :---------: | :------------: | :-----: | --------------------------------------------------------------- | | **MPI** | `--mpi` | `--no-mpi` | On | Allows MFC to run on multiple processors (and nodes). | -| **GPU** | `--gpu` | `--no-gpu` | Off | Enables GPU acceleration via OpenACC. | +| **GPU** | `--gpu` | `--no-gpu` | Off | Enables GPU acceleration via OpenACC or OpenMP offload. | | **Debug** | `--debug` | `--no-debug` | Off | Requests the compiler build MFC in debug mode. | | **GCov** | `--gcov` | `--no-gcov` | Off | Build MFC with coverage flags on. | | **Unified Memory** | `--unified` | `--no-unified` | Off | Build MFC with unified CPU/GPU memory (GH200 superchip only) | -| **Single** | `--single` | `--no-single` | Off | Build MFC in single precision +| **Single** | `--single` | `--no-single` | Off | Build MFC in single precision | -_⚠️ The `--gpu` option requires that your compiler supports OpenACC for Fortran for your target GPU architecture._ +_⚠️ The `--gpu` option requires a supported compiler: NVHPC for NVIDIA GPUs (OpenACC or OpenMP), Cray for AMD GPUs (OpenACC or OpenMP), or AMD compilers for AMD GPUs (OpenMP only)._ When these options are given to `mfc.sh`, they will be remembered when you issue future commands. You can enable and disable features anytime by passing any of the arguments above. @@ -173,7 +175,7 @@ In brief, you can run the latest MFC container: ```bash docker run -it --rm --entrypoint bash sbryngelson/mfc:latest-cpu ``` -Please refer to the [Docker](docker.md) document for more information. +Please refer to the @ref docker "Docker" document for more information. ## Running the Test Suite @@ -182,7 +184,7 @@ Run MFC's test suite with 8 threads: ./mfc.sh test -j 8 ``` -Please refer to the [Testing](testing.md) document for more information. +Please refer to the @ref testing "Testing" document for more information. ## Running an Example Case @@ -192,4 +194,42 @@ MFC has example cases in the `examples` folder. You can run such a case interact ./mfc.sh run examples/2D_shockbubble/case.py -n 2 ``` -Please refer to the [Running](running.md) document for more information on `case.py` files and how to run them. +Please refer to the @ref running "Running" document for more information on `case.py` files and how to run them. + +## Helpful Tools + +### Parameter Lookup + +MFC has over 3,000 case parameters. Use the `params` command to search and explore them: + +```shell +./mfc.sh params dt # Search for parameters matching "dt" +./mfc.sh params -d dt # Show parameter with description +./mfc.sh params patch # Find all patch-related parameters +./mfc.sh params --family # List all parameter families +``` + +### Creating a New Case + +Generate a case file template to get started quickly: + +```shell +./mfc.sh new my_case.py # Create a new case file from template +``` + +### Shell Completion + +Enable tab-completion for `./mfc.sh` commands: + +**Bash** (add to `~/.bashrc`): +```bash +source /path/to/MFC/toolchain/completions/mfc.bash +``` + +**Zsh** (add to `~/.zshrc`): +```zsh +fpath=(/path/to/MFC/toolchain/completions $fpath) +autoload -Uz compinit && compinit +``` + +After reloading your shell, press Tab to complete commands and options. diff --git a/docs/documentation/gpuParallelization.md b/docs/documentation/gpuParallelization.md index af9da2faf4..cc67923e60 100644 --- a/docs/documentation/gpuParallelization.md +++ b/docs/documentation/gpuParallelization.md @@ -1,3 +1,5 @@ +@page gpuParallelization GPU Parallelization + # GPU Parallelization MFC compiles GPU code via OpenACC and in the future OpenMP as well. diff --git a/docs/documentation/papers.md b/docs/documentation/papers.md index 51b7629f09..1b5fd604cb 100644 --- a/docs/documentation/papers.md +++ b/docs/documentation/papers.md @@ -1,3 +1,5 @@ +@page papers Papers + # Papers MFC 5.0: An exascale many-physics flow solver. [Wilfong, B., Le Berre, H., Radhakrishnan, A., Gupta, A., Vaca-Revelo, D., Adam , D., Yu, H., Lee, H., Chreim, J. R., Carcana Barbosa, M., Zhang, Y., Cisneros-Garibay, E., Gnanaskandan, A., Rodriguez Jr., M., Budiardja, R. D., Abbott, S., Colonius, T., & Bryngelson, S. H. (2025) MFC 5.0: An exascale many-physics flow solver. arXiv:2503.07953. Equal contribution.](https://doi.org/10.48550/arXiv.2503.07953) diff --git a/docs/documentation/readme.md b/docs/documentation/readme.md index 4ce5b6b6e1..3891c363f4 100644 --- a/docs/documentation/readme.md +++ b/docs/documentation/readme.md @@ -1,23 +1,34 @@ -# Documentation +@mainpage MFC Documentation -## User Documentation +Welcome to the Multi-component Flow Code (MFC) documentation. -- [Getting Started](getting-started.md) -- [Testing](testing.md) -- [Case Files](case.md) -- [Example Cases](examples.md) -- [Running](running.md) -- [Flow Visualization](visualization.md) -- [Performance](expectedPerformance.md) -- [GPU Offloading](gpuParallelization.md) -- [Authors](authors.md) -- [References](references.md) +## Getting Started -## Code/API Documentation +- @ref getting-started "Getting Started" - Installation and first steps +- @ref running "Running" - How to run simulations (interactive, batch, GPU) +- @ref case "Case Files" - Setting up input files +- @ref testing "Testing" - Running the test suite -MFC's three codes have their own documentation: +## Reference -- [Pre-Process](../pre_process/) -- [Simulation](../simulation/) -- [Post-Process](../post_process/) - +- @ref parameters "Case Parameters" - All ~3,400 parameters +- @ref cli-reference "CLI Reference" - Command line options +- @ref case_constraints "Case Creator Guide" - Feature compatibility + +## Examples & Visualization + +- @ref examples "Example Cases" - Sample simulations +- @ref visualization "Flow Visualization" - Post-processing + +## Advanced Topics + +- @ref expectedPerformance "Performance" - Optimization and benchmarks +- @ref gpuParallelization "GPU Parallelization" - GPU macro API (developer reference) +- @ref docker "Containers" - Docker usage +- @ref troubleshooting "Troubleshooting" - Debugging and common issues + +## About + +- @ref papers "Papers" - Publications using MFC +- @ref references "References" - Bibliography +- @ref authors "Authors" - Contributors diff --git a/docs/documentation/references.md b/docs/documentation/references.md index 3e8db35b52..184dd82b0b 100644 --- a/docs/documentation/references.md +++ b/docs/documentation/references.md @@ -1,3 +1,5 @@ +@page references References + # References - Allaire, G., Clerc, S., and Kokh, S. (2002). A five-equation model for the simulation of interfaces between compressible fluids. Journal of Computational Physics, 181(2):577–616. diff --git a/docs/documentation/running.md b/docs/documentation/running.md index 4270f089dd..112d246d82 100644 --- a/docs/documentation/running.md +++ b/docs/documentation/running.md @@ -1,3 +1,5 @@ +@page running Running + # Running MFC can be run using `mfc.sh`'s `run` command. @@ -41,6 +43,8 @@ mfc -n 2 Please refer to `./mfc.sh run -h` for a complete list of arguments and options, along with their defaults. +--- + ## Interactive Execution To run all stages of MFC, that is [pre_process](https://github.com/MFlowCode/MFC/tree/master/src/pre_process/), [simulation](https://github.com/MFlowCode/MFC/tree/master/src/simulation/), and [post_process](https://github.com/MFlowCode/MFC/tree/master/src/post_process/) on the sample case [2D_shockbubble](https://github.com/MFlowCode/MFC/tree/master/examples/2D_shockbubble/), @@ -69,69 +73,225 @@ using 4 cores: ./mfc.sh run examples/2D_shockbubble/case.py -t simulation post_process -n 4 ``` -## Batch Execution +--- -The MFC detects which scheduler your system is using and handles the creation and execution of batch scripts. -The batch engine is requested via the `-e batch` option. -The number of nodes can be specified with the `-N` (i.e., `--nodes`) option. +## Running on GPUs + +MFC supports GPU acceleration via OpenACC (default) or OpenMP offloading. +This section covers how to build and run MFC on GPU systems. + +### Building with GPU Support + +First, build MFC with GPU support enabled: + +```shell +# Using OpenACC (default, recommended for NVIDIA) +./mfc.sh build --gpu + +# Explicitly specify OpenACC +./mfc.sh build --gpu acc + +# Using OpenMP offloading (alternative) +./mfc.sh build --gpu mp +``` -We provide a list of (baked-in) submission batch scripts in the `toolchain/templates` folder. +On HPC clusters, load the appropriate modules first: ```shell -./mfc.sh run examples/2D_shockbubble/case.py -e batch -N 2 -n 4 -t simulation -c +source ./mfc.sh load -c -m g # 'g' for GPU mode +./mfc.sh build --gpu -j $(nproc) ``` -Other useful arguments include: +### Running on GPUs + +Run simulations with GPU support: + +```shell +# Basic GPU run +./mfc.sh run case.py --gpu + +# Specify GPU IDs (useful for multi-GPU nodes) +./mfc.sh run case.py --gpu -g 0 1 2 3 -- `-# ` to name your job. (i.e., `--name`) -- `-@ sample@example.com` to receive emails from the scheduler. (i.e., `--email`) -- `-w hh:mm:ss` to specify the job's maximum allowed walltime. (i.e., `--walltime`) -- `-a ` to identify the account to be charged for the job. (i.e., `--account`) -- `-p ` to select the job's partition. (i.e., `--partition`) +# Run with 4 MPI ranks (typically one per GPU) +./mfc.sh run case.py -n 4 --gpu +``` -As an example, one might request GPUs on a SLURM system using the following: +### Supported Compilers -**Disclaimer**: IBM's JSRUN on LSF-managed computers does not use the traditional node-based approach to -allocate resources. Therefore, the MFC constructs equivalent resource sets in the task and GPU count. +| Vendor | Compiler | OpenACC | OpenMP | +|--------|----------|---------|--------| +| NVIDIA | NVHPC (nvfortran) | Yes | Yes | +| AMD | Cray (ftn) | Yes | Yes | +| AMD | AMD (amdflang) | No | Yes | -### GPU Profiling +### Environment Setup + +Most HPC systems require loading GPU-specific modules: + +**NVIDIA systems:** +```shell +module load nvhpc cuda +# Or use MFC's module loader: +source ./mfc.sh load -c phoenix -m g +``` + +**AMD systems:** +```shell +module load rocm amdflang +# Or use MFC's module loader: +source ./mfc.sh load -c frontier -m g +``` + +### Verifying GPU Detection + +Check that GPUs are visible before running: + +```shell +# NVIDIA +nvidia-smi + +# AMD +rocm-smi +``` + +To force GPU usage (fails fast if no GPU available): +```shell +export OMP_TARGET_OFFLOAD=MANDATORY +./mfc.sh run case.py --gpu +``` + +### GPU Profiling + +MFC integrates with vendor profiling tools for performance analysis. #### NVIDIA GPUs -MFC provides two different arguments to facilitate profiling with NVIDIA Nsight. -**Please ensure the used argument is placed at the end so their respective flags can be appended.** -- Nsight Systems (Nsys): `./mfc.sh run ... -t simulation --nsys [nsys flags]` allows one to visualize MFC's system-wide performance with [NVIDIA Nsight Systems](https://developer.nvidia.com/nsight-systems). -NSys is best for understanding the order and execution times of major subroutines (WENO, Riemann, etc.) in MFC. -When used, `--nsys` will run the simulation and generate `.nsys-rep` files in the case directory for all targets. -These files can then be imported into Nsight System's GUI, which can be downloaded [here](https://developer.nvidia.com/nsight-systems/get-started#latest-Platforms). To keep the report files small, it is best to run case files with a few timesteps. Learn more about NVIDIA Nsight Systems [here](https://docs.nvidia.com/nsight-systems/UserGuide/index.html). -- Nsight Compute (NCU): `./mfc.sh run ... -t simulation --ncu [ncu flags]` allows one to conduct kernel-level profiling with [NVIDIA Nsight Compute](https://developer.nvidia.com/nsight-compute). -NCU provides profiling information for every subroutine called and is more detailed than NSys. -When used, `--ncu` will output profiling information for all subroutines, including elapsed clock cycles, memory used, and more after the simulation is run. -Adding this argument will significantly slow the simulation and should only be used on case files with a few timesteps. -Learn more about NVIDIA Nsight Compute [here](https://docs.nvidia.com/nsight-compute/NsightCompute/index.html). +**Nsight Systems** (timeline/system-level profiling): +```shell +./mfc.sh run case.py -t simulation --nsys [nsys flags] +``` +- Best for understanding execution order and timing of major subroutines (WENO, Riemann, etc.) +- Generates `.nsys-rep` files in the case directory +- Open with [NVIDIA Nsight Systems GUI](https://developer.nvidia.com/nsight-systems/get-started) +- Use few timesteps to keep report files small +**Nsight Compute** (kernel-level profiling): +```shell +./mfc.sh run case.py -t simulation --ncu [ncu flags] +``` +- Detailed per-kernel metrics: cycles, memory usage, occupancy +- Significantly slower than normal execution +- Use only with few timesteps #### AMD GPUs -- Rocprof Systems (RSYS): `./mfc.sh run ... -t simulation --rsys --hip-trace [rocprof flags]` allows one to visualize MFC's system-wide performance with [Perfetto UI](https://ui.perfetto.dev/). -When used, `--roc` will run the simulation and generate files in the case directory for all targets. -`results.json` can then be imported in [Perfetto's UI](https://ui.perfetto.dev/). -Learn more about AMD Rocprof [here](https://rocm.docs.amd.com/projects/rocprofiler/en/docs-5.5.1/rocprof.html) -It is best to run case files with few timesteps to keep the report file sizes manageable. -- Rocprof Compute (RCU): `./mfc.sh run ... -t simulation --rcu -n [rocprof-compute flags]` allows one to conduct kernel-level profiling with [ROCm Compute Profiler](https://rocm.docs.amd.com/projects/rocprofiler-compute/en/latest/what-is-rocprof-compute.html). -When used, `--rcu` will output profiling information for all subroutines, including rooflines, cache usage, register usage, and more, after the simulation is run. -Adding this argument will moderately slow down the simulation and run the MFC executable several times. -For this reason, it should only be used with case files with few timesteps. + +**rocprof-systems** (timeline profiling): +```shell +./mfc.sh run case.py -t simulation --rsys --hip-trace [rocprof flags] +``` +- Generates files viewable in [Perfetto UI](https://ui.perfetto.dev/) +- Use few timesteps for manageable file sizes + +**rocprof-compute** (kernel-level profiling): +```shell +./mfc.sh run case.py -t simulation --rcu -n [rocprof-compute flags] +``` +- Provides rooflines, cache usage, register usage +- Runs the executable multiple times (moderately slower) + +> [!NOTE] +> Place profiling arguments at the end of the command so their flags can be appended. + +--- + +## Batch Execution + +The MFC detects which scheduler your system is using and handles the creation and execution of batch scripts. +Use batch mode for running on HPC clusters with job schedulers (SLURM, PBS, LSF). + +### Basic Usage + +```shell +./mfc.sh run case.py -e batch -N 2 -n 4 -c +``` + +### Batch Options + +| Option | Long Form | Description | +|--------|-----------|-------------| +| `-e batch` | `--engine batch` | Enable batch submission (required) | +| `-N` | `--nodes` | Number of nodes to request | +| `-n` | `--tasks-per-node` | MPI ranks per node | +| `-w` | `--walltime` | Maximum job time (HH:MM:SS) | +| `-a` | `--account` | Account/allocation to charge | +| `-p` | `--partition` | Queue/partition name | +| `-q` | `--qos` | Quality of service level | +| `-@` | `--email` | Email for job notifications | +| `-#` | `--name` | Job name (default: MFC) | +| `-c` | `--computer` | Submission template to use | + +### Examples + +**Basic 4-node job:** +```shell +./mfc.sh run case.py -e batch -N 4 -n 8 -w 02:00:00 +``` + +**Job with account and email:** +```shell +./mfc.sh run case.py -e batch -N 2 -a myproject -@ user@example.com +``` + +**GPU job on Frontier:** +```shell +./mfc.sh run case.py -e batch -N 4 -n 8 -c frontier --gpu -w 01:00:00 +``` + +**Dry run (preview script without submitting):** +```shell +./mfc.sh run case.py -e batch -N 2 --dry-run +``` + +**Wait for job completion:** +```shell +./mfc.sh run case.py -e batch -N 2 --wait +``` + +### Computer Templates + +MFC includes pre-configured templates in `toolchain/templates/` for many clusters. +Use `-c ` to select one: + +```shell +./mfc.sh run case.py -e batch -c phoenix # Georgia Tech Phoenix +./mfc.sh run case.py -e batch -c frontier # OLCF Frontier +./mfc.sh run case.py -e batch -c delta # NCSA Delta +``` + +If no template exists for your cluster, use `-c default` and customize as needed, +or contribute a new template. + +### Scheduler Notes + +**SLURM systems:** +Most clusters use SLURM. MFC automatically generates appropriate `sbatch` scripts. + +**LSF systems (e.g., Summit):** +IBM's JSRUN does not use the traditional node-based approach. MFC constructs equivalent resource sets for task and GPU counts. + +--- -### Restarting Cases +## Restarting Cases When running a simulation, MFC generates a `./restart_data` folder in the case directory that contains `lustre_*.dat` files that can be used to restart a simulation from saved timesteps. This allows a user to simulate some timestep $X$, then continue it to run to another timestep $Y$, where $Y > X$. The user can also choose to add new patches at the intermediate timestep. -If you want to restart a simulation, +If you want to restart a simulation, -- For a simulation that uses a constant time step set up the initial case file with: +- For a simulation that uses a constant time step set up the initial case file with: - `t_step_start` : $t_i$ - `t_step_stop` : $t_f$ - `t_step_save` : $SF$ in which $t_i$ is the starting time, $t_f$ is the final time, and $SF$ is the saving frequency time. @@ -146,7 +306,7 @@ If you want to restart a simulation, - When the simulation stops, choose any Lustre file as the restarting point (lustre_ $t_s$.dat) - Create a new duplicate input file (e.g., `restart_case.py`), which should have: -1. For the Computational Domain Parameters +1. For the Computational Domain Parameters - Have the following removed __except__ `m`, `n`, and `p`: - All domain/mesh information - `(xyz)_domain%beg` @@ -178,22 +338,22 @@ If you want to restart a simulation, - `patch_icpp(2)%all variables` - `patch_icpp(num_patches)%all variables` - Add information about new patches that will be introduced, if any. The parameter num_patches should reflect this addition. - - e.g. `patch_icpp(1)%some variables of interest` + - e.g. `patch_icpp(1)%some variables of interest` -4. For Fluid properties +4. For Fluid properties - Keep information about the fluid properties - Run pre-process and simulation on `restart_case.py` - `./mfc.sh run restart_case.py -t pre_process simulation ` - + - Run the post_process - There are several ways to do this. Keep in mind that, regardless of the .py file used, the post_process command will generate output files in the [`t_step_start`, `t_step_stop`] range, with `t_step_save` as the spacing between files. - - One way is to set `t_step_stop` to the restarting point $t_s$ in `case.py`. Then, run the commands below. The first command will run on timesteps $[t_i, t_s]$. The second command will run on $[t_s, t_{f2}]$. Therefore, the whole range $[t_i, t_{f2}]$ will be post processed. + - One way is to set `t_step_stop` to the restarting point $t_s$ in `case.py`. Then, run the commands below. The first command will run on timesteps $[t_i, t_s]$. The second command will run on $[t_s, t_{f2}]$. Therefore, the whole range $[t_i, t_{f2}]$ will be post-processed. ```shell ./mfc.sh run case.py -t post_process ./mfc.sh run restart_case.py -t post_process -``` +``` We have provided an example, `case.py` and `restart_case.py` in `/examples/1D_vacuum_restart/`. This simulation is a duplicate of the `1D_vacuum` case. It demonstrates stopping at timestep 7000, adding a new patch, and restarting the simulation. To test this code, run: @@ -204,7 +364,9 @@ We have provided an example, `case.py` and `restart_case.py` in `/examples/1D_va ./mfc.sh run examples/1D_vacuum_restart/restart_case.py -t post_process ``` -### Example Runs +--- + +## Example Runs - Oak Ridge National Laboratory's [Summit](https://www.olcf.ornl.gov/summit/): diff --git a/docs/documentation/testing.md b/docs/documentation/testing.md index e139c5b201..ffb5ee3402 100644 --- a/docs/documentation/testing.md +++ b/docs/documentation/testing.md @@ -1,3 +1,5 @@ +@page testing Testing + ## Testing To run MFC's test suite, run diff --git a/docs/documentation/troubleshooting.md b/docs/documentation/troubleshooting.md new file mode 100644 index 0000000000..9a2bfa1541 --- /dev/null +++ b/docs/documentation/troubleshooting.md @@ -0,0 +1,480 @@ +@page troubleshooting Troubleshooting Guide + +# Troubleshooting Guide + +This guide covers debugging tools, common issues, and troubleshooting workflows for MFC. + +## Quick Reference + +### Debugging Flags + +| Flag | Command | Purpose | +|------|---------|---------| +| `-v` | build, run, test | Show commands being executed | +| `-vv` | build, run, test | Full compiler/cmake output | +| `-vvv` | build | Add cmake dependency debugging | +| `-d` | all | Write debug log to file | +| `--debug` | build | Build with debug symbols | +| `--gcov` | build | Build with code coverage | +| `--no-gpu` | build | Disable GPU to isolate issues | +| `--no-mpi` | build | Disable MPI to isolate issues | + +### Profiling Flags + +| Flag | Command | Purpose | +|------|---------|---------| +| `--ncu` | run | NVIDIA Nsight Compute (kernel profiling) | +| `--nsys` | run | NVIDIA Nsight Systems (timeline profiling) | +| `--rcu` | run | AMD rocprof-compute | +| `--rsys` | run | AMD rocprof | + +### Useful Commands + +```bash +./mfc.sh validate case.py # Check case for errors before running +./mfc.sh build --debug # Build with debug symbols +./mfc.sh build -vv # Build with full compiler output +./mfc.sh run case.py -v # Run with verbose output +./mfc.sh test --only # Run a specific test +./mfc.sh clean # Clean and start fresh +``` + +--- + +## Debugging Workflow + +### Build Fails? + +``` +1. Run with -vv to see full error output + ./mfc.sh build -vv + +2. If dependency issue, try -vvv for cmake debug + ./mfc.sh build -vvv + +3. Try disabling features to isolate: + ./mfc.sh build --no-gpu # Rule out GPU issues + ./mfc.sh build --no-mpi # Rule out MPI issues + +4. Clean and rebuild: + ./mfc.sh clean && ./mfc.sh build +``` + +### Run Fails? + +``` +1. Validate your case first: + ./mfc.sh validate case.py + +2. Run with verbose output: + ./mfc.sh run case.py -v + +3. Check for NaN or constraint errors in output + +4. Try with fewer MPI ranks: + ./mfc.sh run case.py -n 1 + +5. Build in debug mode for better error messages: + ./mfc.sh build --debug + ./mfc.sh run case.py +``` + +### Test Fails? + +``` +1. Run the specific failing test: + ./mfc.sh test --only + +2. Run serially for clearer output: + ./mfc.sh test --only -j 1 + +3. Check test output in: + build/tests// + +4. If changes are intentional, regenerate golden files: + ./mfc.sh test --generate --only +``` + +--- + +## Build Debugging + +### Verbosity Levels + +MFC supports three verbosity levels for builds: + +**`-v` (Level 1):** Shows build progress with file counts +```bash +./mfc.sh build -v +# Shows: [1/42] Compiling m_global_parameters.fpp.f90 +``` + +**`-vv` (Level 2):** Full compiler commands and cmake output +```bash +./mfc.sh build -vv +# Shows: Full compiler invocations with all flags +# Useful for: Seeing exact compiler errors, checking flags +``` + +**`-vvv` (Level 3):** Adds cmake dependency debugging +```bash +./mfc.sh build -vvv +# Shows: cmake --debug-find output +# Useful for: Finding why cmake can't locate MPI, HDF5, etc. +``` + +### Debug Builds + +Build with debug symbols for better error messages and debugging: + +```bash +./mfc.sh build --debug +``` + +This sets `CMAKE_BUILD_TYPE=Debug`, which: +- Adds `-g` debug symbols +- Reduces optimization (`-O0` or `-O1`) +- Enables runtime checks in some compilers +- Makes stack traces more readable + +### Code Coverage + +Build with code coverage instrumentation: + +```bash +./mfc.sh build --gcov +``` + +Useful for identifying which code paths are exercised by tests. + +### Isolating Issues + +When builds fail, isolate the problem by disabling features: + +```bash +# Rule out GPU compiler issues +./mfc.sh build --no-gpu + +# Rule out MPI issues +./mfc.sh build --no-mpi + +# Rule out precision issues +./mfc.sh build --single # Single precision +./mfc.sh build --mixed # Mixed precision +``` + +### Clean Rebuild + +Sometimes a clean rebuild fixes issues: + +```bash +./mfc.sh clean +./mfc.sh build -j $(nproc) +``` + +Or manually: +```bash +rm -rf build/ +./mfc.sh build -j $(nproc) +``` + +--- + +## Runtime Debugging + +### Case Validation + +**Always validate your case before running:** + +```bash +./mfc.sh validate case.py +``` + +This checks for: +- Syntax errors in case.py +- Invalid parameter values +- Constraint violations (incompatible parameter combinations) +- Typos in parameter names (with "did you mean?" suggestions) + +**Example output:** +``` +Errors: + [red] weno_order=7 but must be one of: [1, 3, 5] + [red] Unknown parameter 'weno_ordr' - did you mean 'weno_order'? + +Warnings: + [yellow] bubbles=True recommends setting: nb, polytropic +``` + +For more detailed validation output: +```bash +./mfc.sh validate case.py -d +``` + +### Verbose Runs + +Add verbosity to see what's happening: + +```bash +# Show the exact command being run +./mfc.sh run case.py -v + +# Show job script details +./mfc.sh run case.py -vv +``` + +### Debug Log + +Write detailed debug information to a log file: + +```bash +./mfc.sh run case.py -d +``` + +This creates a debug log with detailed internal state, useful for reporting issues. + +### Running with Fewer Ranks + +To isolate parallel issues, try running with fewer MPI ranks: + +```bash +./mfc.sh run case.py -n 1 # Single rank +./mfc.sh run case.py -n 2 # Two ranks +``` + +--- + +## Test Debugging + +### Running Specific Tests + +Run only a specific test by UUID: + +```bash +./mfc.sh test --only 3D_sphbubcollapse +``` + +Run multiple specific tests: +```bash +./mfc.sh test --only 3D_sphbubcollapse 2D_shockbubble +``` + +### Serial Test Execution + +Run tests serially for clearer output: + +```bash +./mfc.sh test -j 1 +``` + +### Test Output Location + +Test outputs are written to: +``` +build/tests// +``` + +This directory contains: +- Input files generated from case.py +- Output files from each stage (pre_process, simulation, post_process) +- Any error messages or logs + +### Regenerating Golden Files + +If you intentionally changed physics/numerics, regenerate reference files: + +```bash +# Regenerate for specific tests +./mfc.sh test --generate --only + +# Regenerate for a range of tests +./mfc.sh test --generate --from --to +``` + +### Understanding Test Failures + +**"Golden file mismatch"** means numerical results differ from reference values. + +Possible causes: +1. **Intentional changes:** Regenerate golden files with `--generate` +2. **Compiler differences:** Different compilers produce slightly different results +3. **Precision settings:** Single vs double precision +4. **Platform differences:** CPU architecture, GPU differences + +--- + +## Performance Profiling + +### NVIDIA GPU Profiling + +**Nsight Compute** (kernel-level analysis): +```bash +./mfc.sh run case.py --ncu +# Or with additional ncu flags: +./mfc.sh run case.py --ncu --set full +``` + +**Nsight Systems** (timeline/system analysis): +```bash +./mfc.sh run case.py --nsys +# Or with additional nsys flags: +./mfc.sh run case.py --nsys -o profile_output +``` + +### AMD GPU Profiling + +**rocprof-compute** (kernel analysis): +```bash +./mfc.sh run case.py --rcu +``` + +**rocprof** (system profiling): +```bash +./mfc.sh run case.py --rsys +``` + +### Performance vs Correctness + +If results seem wrong, first verify correctness: +1. Run `./mfc.sh validate case.py` +2. Compare against a known working case +3. Try `--debug` build for better error detection + +If results are correct but slow: +1. Use profiling tools above to identify bottlenecks +2. Try `--case-optimization` for production runs (10x speedup) +3. Check GPU utilization with `nvidia-smi` or `rocm-smi` + +--- + +## Common Build Errors + +### "CMake could not find MPI" + +**Cause:** MPI is not installed or not in PATH. + +**Fix:** +- **Ubuntu/Debian:** `sudo apt install libopenmpi-dev openmpi-bin` +- **macOS (Homebrew):** `brew install open-mpi` +- **HPC systems:** `module load openmpi` or use `./mfc.sh load` + +Verify: `mpirun --version` + +### "CMake could not find a Fortran compiler" + +**Cause:** No Fortran compiler installed or not in PATH. + +**Fix:** +- **Ubuntu/Debian:** `sudo apt install gfortran` +- **macOS (Homebrew):** `brew install gcc` +- **HPC systems:** `module load gcc` or `module load nvhpc` + +### "Fypp preprocessing failed" + +**Cause:** Syntax error in `.fpp` files or missing Fypp. + +**Fix:** +1. Ensure Fypp is installed: `pip install fypp` +2. Check the specific error line in the output +3. Run `./mfc.sh format` to check for formatting issues + +### Build fails with GPU/OpenACC errors + +**Cause:** GPU compiler not properly configured. + +**Fix:** +1. Verify GPU compiler is available: + - NVIDIA: `nvfortran --version` + - AMD: `ftn --version` or `amdflang --version` +2. Load appropriate modules: `module load nvhpc` or `module load rocm` +3. Set CUDA compute capability if needed: `export MFC_CUDA_CC=80` +4. Try building without GPU first: `./mfc.sh build --no-gpu` + +### "Module not found" on HPC + +**Cause:** Required modules not loaded. + +**Fix:** +1. Use MFC's module loader if available for your system: + ```bash + source ./mfc.sh load -c -m + ``` +2. Or manually load modules: + ```bash + module load gcc openmpi cmake python + ``` + +--- + +## Common Runtime Errors + +### "Case parameter constraint violations" + +**Cause:** Invalid combination of simulation parameters. + +**Fix:** +1. Run `./mfc.sh validate case.py` for detailed diagnostics +2. Check the specific constraint mentioned +3. Review similar examples in `examples/` + +Common issues: +- Grid dimensions (`m`, `n`, `p`) not matching dimensionality +- `weno_order` too high for grid size +- Boundary conditions not matching domain setup + +### "NaN detected" + +**Cause:** Numerical instability. + +**Fix:** +1. Reduce time step (`dt`) +2. Check initial conditions for unphysical values +3. Verify boundary conditions are appropriate +4. Try a more diffusive scheme initially +5. Check CFL condition + +### GPU not detected + +**Cause:** GPU drivers or runtime not configured. + +**Fix:** +1. Verify GPU is visible: + - NVIDIA: `nvidia-smi` + - AMD: `rocm-smi` +2. Check compiler supports GPU offloading +3. Ensure correct modules are loaded +4. Set `OMP_TARGET_OFFLOAD=MANDATORY` to force GPU (fails fast if unavailable) + +### MPI errors or hangs + +**Cause:** MPI configuration issues or deadlocks. + +**Fix:** +1. Verify MPI works: `mpirun -n 2 hostname` +2. Try with fewer ranks: `./mfc.sh run case.py -n 1` +3. Check for incompatible MPI/compiler combinations +4. On clusters, ensure you're using the correct MPI for the interconnect + +--- + +## Getting Help + +If you can't resolve an issue: + +1. **Search existing issues:** [GitHub Issues](https://github.com/MFlowCode/MFC/issues) + +2. **Check documentation:** [MFC Documentation](https://mflowcode.github.io/) + +3. **Open a new issue** with: + - Your OS and compiler versions + - The exact command you ran + - Complete error output (use `-vv` for builds) + - Your case file (if applicable) + - Output of `./mfc.sh validate case.py` + +4. **Use the CLI help:** + ```bash + ./mfc.sh help debugging + ./mfc.sh help gpu + ./mfc.sh -h + ``` diff --git a/docs/documentation/visualization.md b/docs/documentation/visualization.md index dd9c5a295e..ac40d10b1c 100644 --- a/docs/documentation/visualization.md +++ b/docs/documentation/visualization.md @@ -1,7 +1,9 @@ +@page visualization Flow visualization + # Flow visualization A post-processed database in Silo-HDF5 format can be visualized and analyzed using Paraview and VisIt. -After the post-processing of simulation data (see section [Running](running.md)), a directory named `silo_hdf5` contains a silo-HDF5 database. +After the post-processing of simulation data (see section @ref running "Running"), a directory named `silo_hdf5` contains a silo-HDF5 database. Here, `silo_hdf5/` includes a directory named `root/` that contains index files for flow field data at each saved time step. ### Visualizing with Paraview @@ -38,7 +40,7 @@ For many cases, this step will require resizing the render view window. VisIt is an alternative open-source interactive parallel visualization and graphical analysis tool for viewing scientific data. Versions of VisIt after 2.6.0 have been confirmed to work with the MFC databases for some parallel environments. Nevertheless, installation and configuration of VisIt can be environment-dependent and are left to the user. -Further remarks on parallel flow visualization, analysis, and processing of the MFC database using VisIt can also be found in [Coralic (2015)](references.md) and [Meng (2016)](references.md). +Further remarks on parallel flow visualization, analysis, and processing of the MFC database using VisIt can also be found in [Coralic (2015)](@ref references) and [Meng (2016)](@ref references). The user can launch VisIt and open the index files under `/silo_hdf5/root`. Once the database is loaded, flow field variables contained in the database can be added to the plot. diff --git a/docs/examples.sh b/docs/examples.sh index 7a2e77df18..42a514eb2d 100755 --- a/docs/examples.sh +++ b/docs/examples.sh @@ -7,7 +7,7 @@ set -e -x examples_md="$1/docs/documentation/examples.md" rm "$examples_md" || true -echo -e "# Example Cases\n" > "$examples_md" +echo -e "@page examples Example Cases\n\n# Example Cases\n" > "$examples_md" for casedir in $(find "$1/examples/" -mindepth 1 -maxdepth 1 -type d); do casename="$(basename "$casedir")" diff --git a/docs/gen_cli_reference.sh b/docs/gen_cli_reference.sh new file mode 100755 index 0000000000..d4ffa37f67 --- /dev/null +++ b/docs/gen_cli_reference.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Generate CLI reference documentation from cli/commands.py + +set -e + +REPO_ROOT="$1" + +if [ -z "$REPO_ROOT" ]; then + echo "Usage: $0 " + exit 1 +fi + +echo "Generating CLI reference documentation..." +cd "$REPO_ROOT/toolchain" +python3 -c " +import sys; sys.path.insert(0, '.') +from mfc.cli.commands import MFC_CLI_SCHEMA +from mfc.cli.docs_gen import generate_cli_reference +print(generate_cli_reference(MFC_CLI_SCHEMA)) +" > "$REPO_ROOT/docs/documentation/cli-reference.md" +echo "Generated docs/documentation/cli-reference.md" diff --git a/docs/gen_parameters.sh b/docs/gen_parameters.sh new file mode 100755 index 0000000000..ad31763910 --- /dev/null +++ b/docs/gen_parameters.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Generate parameters documentation from parameter registry + +set -e + +REPO_ROOT="$1" + +if [ -z "$REPO_ROOT" ]; then + echo "Usage: $0 " + exit 1 +fi + +echo "Generating parameters documentation..." +cd "$REPO_ROOT/toolchain" +python3 -c " +import sys; sys.path.insert(0, '.') +from mfc.params.generators.docs_gen import generate_parameter_docs +print(generate_parameter_docs()) +" > "$REPO_ROOT/docs/documentation/parameters.md" +echo "Generated docs/documentation/parameters.md" diff --git a/docs/index.html b/docs/index.html index 86104146de..f2fca62a1d 100644 --- a/docs/index.html +++ b/docs/index.html @@ -277,15 +277,15 @@ - + Quick Start - + Documentation - + Papers diff --git a/examples/1D_bubblescreen/case.py b/examples/1D_bubblescreen/case.py index 3fe8aaf444..fec53c55e1 100755 --- a/examples/1D_bubblescreen/case.py +++ b/examples/1D_bubblescreen/case.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -import math, json +import math +import json # FLUID PROPERTIES R_uni = 8314.0 # [J/kmol/K] diff --git a/examples/1D_convergence/plot.py b/examples/1D_convergence/plot.py index 60747b6965..6af7e5c1f9 100644 --- a/examples/1D_convergence/plot.py +++ b/examples/1D_convergence/plot.py @@ -18,15 +18,15 @@ exact_a1 = pd.read_csv(f"N{N[i]}_O{Ord[j]}/D/cons.5.00.000000.dat", sep=r"\s+", header=None, names=["x", "y"]) exact_a2 = pd.read_csv(f"N{N[i]}_O{Ord[j]}/D/cons.6.00.000000.dat", sep=r"\s+", header=None, names=["x", "y"]) - ## 2 norm + # 2 norm errors[i, j, 0] = np.linalg.norm(sim_a1.y - exact_a1.y) / np.sqrt(N[i]) errors[i, j, 0] += np.linalg.norm(sim_a2.y - exact_a2.y) / np.sqrt(N[i]) - ## 1 norm + # 1 norm errors[i, j, 1] = 1 / N[i] * np.sum(np.abs(sim_a1.y - exact_a1.y)) errors[i, j, 1] += 1 / N[i] * np.sum(np.abs(sim_a2.y - exact_a2.y)) - ## Inf norm + # Inf norm errors[i, j, 2] = np.max([np.nanmax(np.abs(sim_a1.y - exact_a1.y)), np.nanmax(np.abs(sim_a2.y - exact_a2.y))]) fig, ax = plt.subplots(1, 3, figsize=(12, 8), sharex=True) diff --git a/examples/1D_exp_bubscreen/case.py b/examples/1D_exp_bubscreen/case.py index a7bda307b3..d8590436c6 100755 --- a/examples/1D_exp_bubscreen/case.py +++ b/examples/1D_exp_bubscreen/case.py @@ -114,7 +114,7 @@ "prim_vars_wrt": "T", "parallel_io": "F", "fd_order": 1, - #'schlieren_wrt' :'T', + # 'schlieren_wrt' :'T', "probe_wrt": "T", "num_probes": 1, "probe(1)%x": 0.0, diff --git a/examples/1D_exp_tube_phasechange/case.py b/examples/1D_exp_tube_phasechange/case.py index 6a0453951e..18bd4c72d9 100644 --- a/examples/1D_exp_tube_phasechange/case.py +++ b/examples/1D_exp_tube_phasechange/case.py @@ -1,5 +1,7 @@ #!/usr/bin/env python3 -import math, json, argparse +import math +import json +import argparse parser = argparse.ArgumentParser(prog="phasechange", description="phase change considering both 5 and 6 equation models.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--mfc", type=json.loads, default="{}", metavar="DICT", help="MFC's toolchain's internal state.") diff --git a/examples/1D_reactive_shocktube/case.py b/examples/1D_reactive_shocktube/case.py index d20bc334f4..c9ce96b804 100644 --- a/examples/1D_reactive_shocktube/case.py +++ b/examples/1D_reactive_shocktube/case.py @@ -3,7 +3,8 @@ # + https://doi.org/10.1016/j.ijhydene.2023.03.190: Verification of numerical method # + https://doi.org/10.1016/j.compfluid.2013.10.014: 4.7. Multi-species reactive shock tube -import json, argparse +import json +import argparse import cantera as ct parser = argparse.ArgumentParser(prog="1D_reactive_shocktube", formatter_class=argparse.ArgumentDefaultsHelpFormatter) diff --git a/examples/2D_acoustic_broadband/case.py b/examples/2D_acoustic_broadband/case.py index cd0cbfd033..c3255333ad 100644 --- a/examples/2D_acoustic_broadband/case.py +++ b/examples/2D_acoustic_broadband/case.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -import json, math +import json +import math print( json.dumps( diff --git a/examples/2D_acoustic_support10_axisym/case.py b/examples/2D_acoustic_support10_axisym/case.py index 3cfa57bdc2..37f8dfbf33 100644 --- a/examples/2D_acoustic_support10_axisym/case.py +++ b/examples/2D_acoustic_support10_axisym/case.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -import json, math +import json +import math # Configuring case dictionary print( diff --git a/examples/2D_acoustic_support2/case.py b/examples/2D_acoustic_support2/case.py index 52acd520cf..07ab20336f 100644 --- a/examples/2D_acoustic_support2/case.py +++ b/examples/2D_acoustic_support2/case.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -import json, math +import json +import math # Configuring case dictionary print( diff --git a/examples/2D_acoustic_support9/case.py b/examples/2D_acoustic_support9/case.py index 57317c5263..de7972aad8 100644 --- a/examples/2D_acoustic_support9/case.py +++ b/examples/2D_acoustic_support9/case.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -import json, math +import json +import math # Configuring case dictionary print( diff --git a/examples/2D_axisym_shockwatercavity/case.py b/examples/2D_axisym_shockwatercavity/case.py index 46c9e5eb7f..bded43fd7a 100644 --- a/examples/2D_axisym_shockwatercavity/case.py +++ b/examples/2D_axisym_shockwatercavity/case.py @@ -18,7 +18,7 @@ # the droplet is about D0/8 ISD = 5.0 / 8 * D0 -## pre-shock properties - AIR +# pre-shock properties - AIR # pressure - Pa p0a = patm @@ -35,7 +35,7 @@ # speed of sound - M/s c_a = math.sqrt(gama * (p0a + pia) / rho0a) -## Droplet - WATER +# Droplet - WATER # surface tension - N / m st = 0.00e0 @@ -62,7 +62,7 @@ # Min or psOp0a. Just comment/uncomment appropriately Min = 2.146 -## Pos to pre shock ratios - AIR +# Pos to pre shock ratios - AIR # pressure psOp0a = (Min**2 - 1) * 2 * gama / (gama + 1) + 1 @@ -77,7 +77,7 @@ # shock speed of sound - m/s ss = Ms * c_a -## post-shock - AIR +# post-shock - AIR # pressure - Pa ps = psOp0a * p0a @@ -91,7 +91,7 @@ # velocity at the post shock - m/s vel = c_a / gama * (psOp0a - 1.0) * p0a / (p0a + pia) / Ms -## Domain boundaries - m +# Domain boundaries - m # x direction xb = -2.4707 * D0 @@ -135,7 +135,7 @@ # Save Frequency. Note that the number of autosaves will be SF + 1, as th IC (0.dat) is also saved SF = 400 -## making Nt divisible by SF +# making Nt divisible by SF # 1 - ensure NtA goes slightly beyond tendA NtA = int(tendA // dt + 1) diff --git a/examples/2D_ibm_steady_shock/case.py b/examples/2D_ibm_steady_shock/case.py index 86be786af5..9062ea754b 100644 --- a/examples/2D_ibm_steady_shock/case.py +++ b/examples/2D_ibm_steady_shock/case.py @@ -155,7 +155,7 @@ # Surrounding liquid "fluid_pp(1)%gamma": 1.0e00 / (n_tait - 1.0e00), "fluid_pp(1)%pi_inf": n_tait * B_tait / (n_tait - 1.0), - #'fluid_pp(1)%Re(1)' : 67567, + # 'fluid_pp(1)%Re(1)' : 67567, } ) ) diff --git a/examples/2D_phasechange_bubble/case.py b/examples/2D_phasechange_bubble/case.py index 6696b559e1..012afe3ad3 100644 --- a/examples/2D_phasechange_bubble/case.py +++ b/examples/2D_phasechange_bubble/case.py @@ -1,12 +1,14 @@ #!/usr/bin/env python3 -import math, json, argparse +import math +import json +import argparse parser = argparse.ArgumentParser(prog="phasechange", description="phase change considering both 5 and 6 equation models.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--mfc", type=json.loads, default={}, metavar="DICT", help="MFC's toolchain's internal state.") parser.add_argument("-me", "--model_eqns", type=int, metavar="MODEL EQN", choices=[2, 3], default=3, help="choose `2' for 5-equation model or `3' for 6-equation model.") args = parser.parse_args() -## 1 FOR BACKGROUND, 2 FOR BUBBLE +# 1 FOR BACKGROUND, 2 FOR BUBBLE # Pressure [Pa] p01 = 5e6 p02 = 3550 @@ -95,7 +97,7 @@ c_a1 = math.sqrt(gama * (p01 + pia) / rho0a1) c_a2 = math.sqrt(gama * (p02 + pia) / rho0a2) -## SHOCK RELATIONS +# SHOCK RELATIONS p02Op01 = p02 / p01 # Mach number of the shocked region - this should agree with Min, if everything is correct @@ -117,7 +119,7 @@ aa1 = 1.0 - awl1 - awv1 aa2 = 1.0 - awl2 - awv2 -## SIMULATION PARAMETERS +# SIMULATION PARAMETERS # CFL cfl = 0.50 diff --git a/examples/2D_phasechange_bubble/casefile.py b/examples/2D_phasechange_bubble/casefile.py index cb63c525a0..415b0547f9 100644 --- a/examples/2D_phasechange_bubble/casefile.py +++ b/examples/2D_phasechange_bubble/casefile.py @@ -1,12 +1,15 @@ -#!/usr/bin/env python3 -import math, json, argparse +#!/usr/bin/env python3 + +import math +import json +import argparse parser = argparse.ArgumentParser(prog="phasechange", description="phase change considering both 5 and 6 equation models.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--mfc", type=json.loads, default="{}", metavar="DICT", help="MFC's toolchain's internal state.") parser.add_argument("-me", "--model_eqns", type=int, metavar="MODEL EQN", choices=[2, 3], default=3, help="choose `2' for 5-equation model or `3' for 6-equation model.") args = parser.parse_args() -## 1 FOR BACKGROUND, 2 FOR BUBBLE +# 1 FOR BACKGROUND, 2 FOR BUBBLE # Pressure [Pa] p01 = 5e6 p02 = 3550 @@ -96,7 +99,7 @@ c_a1 = math.sqrt(gama * (p01 + pia) / rho0a1) c_a2 = math.sqrt(gama * (p02 + pia) / rho0a2) -## SHOCK RELATIONS +# SHOCK RELATIONS p02Op01 = p02 / p01 # Mach number of the shocked region - this should agree with Min, if everything is correct @@ -118,7 +121,7 @@ aa1 = 1.0 - awl1 - awv1 aa2 = 1.0 - awl2 - awv2 -## SIMULATION PARAMETERS +# SIMULATION PARAMETERS # CFL cfl = 0.50 diff --git a/examples/2D_riemann_test/case.py b/examples/2D_riemann_test/case.py index f4f588aefd..8680984f23 100644 --- a/examples/2D_riemann_test/case.py +++ b/examples/2D_riemann_test/case.py @@ -32,8 +32,8 @@ # 'recon_type' : 1, "weno_order": 5, "weno_eps": 1e-16, - #'muscl_order' : 2, - #'muscl_lim' : 1, + # 'muscl_order' : 2, + # 'muscl_lim' : 1, "riemann_solver": 2, "wave_speeds": 1, "avg_state": 2, diff --git a/examples/2D_shearlayer/case.py b/examples/2D_shearlayer/case.py index 628686c444..40edaf48b2 100644 --- a/examples/2D_shearlayer/case.py +++ b/examples/2D_shearlayer/case.py @@ -65,12 +65,12 @@ "patch_icpp(1)%geometry": 4, "patch_icpp(1)%x_centroid": 0.00000000000000e00, "patch_icpp(1)%y_centroid": 0.00000000000000e00, - #'patch_icpp(1)%length_x' : 1.00000000000000E+00, - #'patch_icpp(1)%length_y' : 0.50000000000000E+00, + # 'patch_icpp(1)%length_x' : 1.00000000000000E+00, + # 'patch_icpp(1)%length_y' : 0.50000000000000E+00, "patch_icpp(1)%normal(1)": 0.00624987793326e00, "patch_icpp(1)%normal(2)": -0.99998046932219e00, - #'patch_icpp(1)%smooth_patch_id': 1, - #'patch_icpp(1)%smooth_coeff' : 1.00000000000000E+00, + # 'patch_icpp(1)%smooth_patch_id': 1, + # 'patch_icpp(1)%smooth_coeff' : 1.00000000000000E+00, "patch_icpp(1)%vel(1)": -myv, "patch_icpp(1)%vel(2)": 0.00000000000000e00, "patch_icpp(1)%pres": 1.01325000000000e05, diff --git a/examples/2D_shocktube_phasechange/case.py b/examples/2D_shocktube_phasechange/case.py index 6bab1a5c6b..48277019a8 100644 --- a/examples/2D_shocktube_phasechange/case.py +++ b/examples/2D_shocktube_phasechange/case.py @@ -1,20 +1,22 @@ #!/usr/bin/env python3 -import math, json, argparse +import math +import json +import argparse parser = argparse.ArgumentParser(prog="phasechange", description="phase change considering both 5 and 6 equation models.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--mfc", type=json.loads, default="{}", metavar="DICT", help="MFC's toolchain's internal state.") parser.add_argument("-me", "--model_eqns", type=int, metavar="MODEL EQN", choices=[2, 3], default=3, help="choose `2' for 5-equation model or `3' for 6-equation model.") args = parser.parse_args() -## 1 FOR BACKGROUND, 2 FOR SHOKED STATE, 3 FOR WATER REGION (WHEN NEEDED) +# 1 FOR BACKGROUND, 2 FOR SHOKED STATE, 3 FOR WATER REGION (WHEN NEEDED) # Pressure p01 = 1.0843e05 p02 = 2.1114e05 p03 = 1.0685e05 -## FLUID PROPERTIES FOR EACH PATCH +# FLUID PROPERTIES FOR EACH PATCH -## liquid water +# liquid water # pi infty piwl = 1.0e09 @@ -43,7 +45,7 @@ c_wl1 = math.sqrt(gamwl * (p01 + piwl) / rho0wl1) c_wl2 = math.sqrt(gamwl * (p02 + piwl) / rho0wl2) -## Vapor water +# Vapor water # pi infinity piwv = 0 @@ -101,7 +103,7 @@ c_a1 = math.sqrt(gama * (p01 + pia) / rho0a1) c_a2 = math.sqrt(gama * (p02 + pia) / rho0a2) -## SHOCK RELATIONS +# SHOCK RELATIONS p02Op01 = p02 / p01 # Mach number of the shocked region - this should agree with Min, if everything is correct @@ -110,7 +112,7 @@ # shock speed ss = Ms * c_a1 -## SIMULATION PARAMETERS +# SIMULATION PARAMETERS # CFL cfl = 0.25 diff --git a/examples/3D_acoustic_support11/case.py b/examples/3D_acoustic_support11/case.py index ed2e243fed..19625bbce6 100644 --- a/examples/3D_acoustic_support11/case.py +++ b/examples/3D_acoustic_support11/case.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -import json, math +import json +import math # Configuring case dictionary print( diff --git a/examples/3D_acoustic_support3/case.py b/examples/3D_acoustic_support3/case.py index 1aa0e84d16..5548ce8399 100644 --- a/examples/3D_acoustic_support3/case.py +++ b/examples/3D_acoustic_support3/case.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -import json, math +import json +import math # Configuring case dictionary print( diff --git a/examples/3D_phasechange_bubble/case.py b/examples/3D_phasechange_bubble/case.py index 4df061b38d..4e3c5f446f 100644 --- a/examples/3D_phasechange_bubble/case.py +++ b/examples/3D_phasechange_bubble/case.py @@ -1,12 +1,14 @@ #!/usr/bin/env python3 -import math, json, argparse +import math +import json +import argparse parser = argparse.ArgumentParser(prog="phasechange", description="phase change considering both 5 and 6 equation models.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--mfc", type=json.loads, default="{}", metavar="DICT", help="MFC's toolchain's internal state.") parser.add_argument("-me", "--model_eqns", type=int, metavar="MODEL EQN", choices=[2, 3], default=3, help="choose `2' for 5-equation model or `3' for 6-equation model.") args = parser.parse_args() -## 1 FOR BACKGROUND, 2 FOR BUBBLE +# 1 FOR BACKGROUND, 2 FOR BUBBLE # Pressure [Pa] p01 = 5e6 p02 = 3550 @@ -95,7 +97,7 @@ c_a1 = math.sqrt(gama * (p01 + pia) / rho0a1) c_a2 = math.sqrt(gama * (p02 + pia) / rho0a2) -## SHOCK RELATIONS +# SHOCK RELATIONS p02Op01 = p02 / p01 # Mach number of the shocked region - this should agree with Min, if everything is correct @@ -117,7 +119,7 @@ aa1 = 1.0 - awl1 - awv1 aa2 = 1.0 - awl2 - awv2 -## SIMULATION PARAMETERS +# SIMULATION PARAMETERS # CFL cfl = 0.50 diff --git a/examples/3D_recovering_sphere/case.py b/examples/3D_recovering_sphere/case.py index 2960eabcc6..c16ac81b55 100644 --- a/examples/3D_recovering_sphere/case.py +++ b/examples/3D_recovering_sphere/case.py @@ -44,8 +44,8 @@ "t_step_start": 400000, "t_step_stop": 1000000, "t_step_save": 2000, - #'t_step_stop' : 100, - #'t_step_save' : 100, + # 't_step_stop' : 100, + # 't_step_save' : 100, # Simulation Algorithm "model_eqns": 3, "alt_soundspeed": "F", diff --git a/examples/3D_shockdroplet/case.py b/examples/3D_shockdroplet/case.py index b8ba2c2f46..e935861cc1 100644 --- a/examples/3D_shockdroplet/case.py +++ b/examples/3D_shockdroplet/case.py @@ -18,7 +18,7 @@ # the droplet is about D0/8 ISD = 5.0 / 8 * D0 -## pre-shock properties - AIR +# pre-shock properties - AIR # pressure - Pa p0a = patm @@ -35,7 +35,7 @@ # speed of sound - M/s c_a = math.sqrt(gama * (p0a + pia) / rho0a) -## Droplet - WATER +# Droplet - WATER # surface tension - N / m st = 0.00e0 @@ -62,7 +62,7 @@ # Min or psOp0a. Just comment/uncomment appropriately Min = 2.4 -## Pos to pre shock ratios - AIR +# Pos to pre shock ratios - AIR # pressure psOp0a = (Min**2 - 1) * 2 * gama / (gama + 1) + 1 @@ -77,7 +77,7 @@ # shock speed of sound - m/s ss = Ms * c_a -## post-shock - AIR +# post-shock - AIR # pressure - Pa ps = psOp0a * p0a @@ -91,7 +91,7 @@ # velocity at the post shock - m/s vel = c_a / gama * (psOp0a - 1.0) * p0a / (p0a + pia) / Ms -## Domain boundaries - m +# Domain boundaries - m # x direction xb = -8.4707 * D0 @@ -145,7 +145,7 @@ # Save Frequency. Note that the number of autosaves will be SF + 1, as th IC (0.dat) is also saved SF = 400 -## making Nt divisible by SF +# making Nt divisible by SF # 1 - ensure NtA goes slightly beyond tendA NtA = int(tendA // dt + 1) diff --git a/examples/3D_shockdroplet_muscl/case.py b/examples/3D_shockdroplet_muscl/case.py index e8c4069caf..8d4337b0da 100644 --- a/examples/3D_shockdroplet_muscl/case.py +++ b/examples/3D_shockdroplet_muscl/case.py @@ -18,7 +18,7 @@ # the droplet is about D0/8 ISD = 5.0 / 8 * D0 -## pre-shock properties - AIR +# pre-shock properties - AIR # pressure - Pa p0a = patm @@ -35,7 +35,7 @@ # speed of sound - M/s c_a = math.sqrt(gama * (p0a + pia) / rho0a) -## Droplet - WATER +# Droplet - WATER # surface tension - N / m st = 0.00e0 @@ -62,7 +62,7 @@ # Min or psOp0a. Just comment/uncomment appropriately Min = 2.4 -## Pos to pre shock ratios - AIR +# Pos to pre shock ratios - AIR # pressure psOp0a = (Min**2 - 1) * 2 * gama / (gama + 1) + 1 @@ -77,7 +77,7 @@ # shock speed of sound - m/s ss = Ms * c_a -## post-shock - AIR +# post-shock - AIR # pressure - Pa ps = psOp0a * p0a @@ -91,7 +91,7 @@ # velocity at the post shock - m/s vel = c_a / gama * (psOp0a - 1.0) * p0a / (p0a + pia) / Ms -## Domain boundaries - m +# Domain boundaries - m # x direction xb = -8.4707 * D0 @@ -145,7 +145,7 @@ # Save Frequency. Note that the number of autosaves will be SF + 1, as th IC (0.dat) is also saved SF = 400 -## making Nt divisible by SF +# making Nt divisible by SF # 1 - ensure NtA goes slightly beyond tendA NtA = int(tendA // dt + 1) diff --git a/examples/nD_perfect_reactor/case.py b/examples/nD_perfect_reactor/case.py index 9d1e61fcba..680978dfbc 100644 --- a/examples/nD_perfect_reactor/case.py +++ b/examples/nD_perfect_reactor/case.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 # Reference: # + https://doi.org/10.1063/1.1696266 -import json, argparse +import json +import argparse import cantera as ct from mfc.case_utils import * diff --git a/examples/scaling/analyze.py b/examples/scaling/analyze.py index ab6f8067bd..0f358417c3 100644 --- a/examples/scaling/analyze.py +++ b/examples/scaling/analyze.py @@ -1,4 +1,5 @@ -import os, re +import os +import re import pandas as pd from io import StringIO diff --git a/examples/scaling/case.py b/examples/scaling/case.py index 4276b53f38..3efc5615df 100644 --- a/examples/scaling/case.py +++ b/examples/scaling/case.py @@ -1,5 +1,9 @@ #!/usr/bin/env python3 -import sys, json, math, typing, argparse +import sys +import json +import math +import typing +import argparse parser = argparse.ArgumentParser( prog="scaling_and_perf", @@ -104,14 +108,14 @@ def nxyz_from_ncells_strong(ncells: float) -> typing.Tuple[int, int, int]: # the droplet is about D0/8 ISD = 5.0 / 8 * D0 -## pre-shock properties - AIR +# pre-shock properties - AIR p0a = patm # pressure - Pa rho0a = 1.204 # density - kg/m3 gama = 1.40 # gamma pia = 0 # pi infinity - Pa c_a = math.sqrt(gama * (p0a + pia) / rho0a) # speed of sound - M/s -## Droplet - WATER +# Droplet - WATER rho0w = 1000 # density - kg/m3 p0w = patm # pressure - Pa gamw = 6.12 # gamma diff --git a/examples/scaling/export.py b/examples/scaling/export.py index c908fab024..3208cf8541 100644 --- a/examples/scaling/export.py +++ b/examples/scaling/export.py @@ -1,4 +1,8 @@ -import re, os, csv, glob, statistics +import re +import os +import csv +import glob +import statistics from dataclasses import dataclass, fields CDIR = os.path.abspath(os.path.join("examples", "scaling")) diff --git a/mfc.sh b/mfc.sh index 9fb95ae61a..843ea4dc19 100755 --- a/mfc.sh +++ b/mfc.sh @@ -10,6 +10,9 @@ fi # Load utility script . "$(pwd)/toolchain/util.sh" +# Print startup message immediately for user feedback +log "Starting..." + # Handle upgrading from older MFC build systems if [ -d "$(pwd)/bootstrap" ] || [ -d "$(pwd)/dependencies" ] || [ -f "$(pwd)/build/mfc.lock.yaml" ]; then error "Please remove, if applicable, the following directories:" @@ -22,21 +25,34 @@ if [ -d "$(pwd)/bootstrap" ] || [ -d "$(pwd)/dependencies" ] || [ -f "$(pwd)/bui fi # If the user wishes to run the "load" script -if [ "$1" '==' 'load' ]; then +if [ "$1" '==' 'load' ] && [ "$2" != "--help" ] && [ "$2" != "-h" ]; then + # Check if the script is being sourced (required for load to work) + if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + echo "" + echo "mfc: ERROR > The 'load' command must be run with 'source' to set environment variables." + echo "" + echo " Instead of: ./mfc.sh load ..." + echo " Please use: source ./mfc.sh load ..." + echo " or: . ./mfc.sh load ..." + echo "" + echo " Example: source ./mfc.sh load -c p -m g" + echo "" + exit 1 + fi shift; . "$(pwd)/toolchain/bootstrap/modules.sh" $@; return -elif [ "$1" '==' "lint" ]; then +elif [ "$1" '==' "lint" ] && [ "$2" != "--help" ] && [ "$2" != "-h" ]; then . "$(pwd)/toolchain/bootstrap/python.sh" shift; . "$(pwd)/toolchain/bootstrap/lint.sh" $@; exit 0 -elif [ "$1" '==' "format" ]; then +elif [ "$1" '==' "format" ] && [ "$2" != "--help" ] && [ "$2" != "-h" ]; then . "$(pwd)/toolchain/bootstrap/python.sh" shift; . "$(pwd)/toolchain/bootstrap/format.sh" $@; exit 0 elif [ "$1" '==' "venv" ]; then shift; . "$(pwd)/toolchain/bootstrap/python.sh" $@; return -elif [ "$1" '==' "clean" ]; then +elif [ "$1" '==' "clean" ] && [ "$2" != "--help" ] && [ "$2" != "-h" ]; then rm -rf "$(pwd)/build"; exit 0 -elif [ "$1" '==' "spelling" ]; then +elif [ "$1" '==' "spelling" ] && [ "$2" != "--help" ] && [ "$2" != "-h" ]; then . "$(pwd)/toolchain/bootstrap/python.sh" shift; . "$(pwd)/toolchain/bootstrap/spelling.sh" $@; exit 0 @@ -47,6 +63,7 @@ mkdir -p "$(pwd)/build" . "$(pwd)/toolchain/bootstrap/cmake.sh" . "$(pwd)/toolchain/bootstrap/python.sh" +# init command: just bootstrap the environment and exit (no Python command) if [ "$1" '==' 'init' ]; then exit 0 fi diff --git a/toolchain/bootstrap/format.sh b/toolchain/bootstrap/format.sh index 9467c68aa8..8dc3841036 100644 --- a/toolchain/bootstrap/format.sh +++ b/toolchain/bootstrap/format.sh @@ -2,16 +2,26 @@ # Function to display help message show_help() { - echo "Usage: $(basename "$0") [OPTIONS]" - echo "This function formats all code in and below the current execution directory." + echo "Usage: $(basename "$0") [OPTIONS] [PATHS...]" + echo "Format code in the MFC repository." echo "" echo "Options:" echo " -h, --help Display this help message and exit." - echo " -j, --jobs JOBS Runs JOBS number of jobs." + echo " -j, --jobs JOBS Runs JOBS number of parallel jobs." + echo "" + echo "Arguments:" + echo " PATHS Paths to format. Defaults to src/, examples/, benchmarks/" + echo "" + echo "Examples:" + echo " ./mfc.sh format Format all code" + echo " ./mfc.sh format -j 8 Format with 8 parallel jobs" + echo " ./mfc.sh format examples/1D_sodshocktube Format a specific directory" echo "" exit 0 } +PATHS=() + while [[ $# -gt 0 ]]; do case "$1" in -j|--jobs) @@ -22,8 +32,8 @@ while [[ $# -gt 0 ]]; do show_help ;; *) - echo "Format, unknown argument: $1." - exit 1 + # Collect positional arguments as paths + PATHS+=("$1") ;; esac @@ -32,22 +42,46 @@ done log "Formatting MFC:" -if ! find ${@:-src} -type f | grep -Ev 'autogen' | grep -E '\.(f90|fpp)$' \ - | xargs -L 1 -P ${JOBS:-1} $SHELL toolchain/bootstrap/format_file.sh; then - error "Formatting MFC source failed." - exit 1 -fi +if [[ ${#PATHS[@]} -gt 0 ]]; then + # Custom paths provided - format all file types in those paths + SEARCH_PATHS="${PATHS[@]}" -if ! find ${@:-examples} -type f | grep -E '\.(py)$' \ - | xargs -L 1 -P ${JOBS:-1} $SHELL toolchain/bootstrap/format_python.sh; then - error "Formatting MFC examples failed." - exit 1 -fi + # Format Fortran files (.f90, .fpp) + if ! find $SEARCH_PATHS -type f 2>/dev/null | grep -Ev 'autogen' | grep -E '\.(f90|fpp)$' \ + | xargs --no-run-if-empty -L 1 -P ${JOBS:-1} $SHELL toolchain/bootstrap/format_file.sh; then + error "Formatting Fortran files failed." + exit 1 + fi + + # Format Python files + if ! find $SEARCH_PATHS -type f 2>/dev/null | grep -E '\.(py)$' \ + | xargs --no-run-if-empty -L 1 -P ${JOBS:-1} $SHELL toolchain/bootstrap/format_python.sh; then + error "Formatting Python files failed." + exit 1 + fi +else + # Default: format src/, examples/, and benchmarks/ + + # Format Fortran files (.f90, .fpp) in src/ + if ! find src -type f 2>/dev/null | grep -Ev 'autogen' | grep -E '\.(f90|fpp)$' \ + | xargs --no-run-if-empty -L 1 -P ${JOBS:-1} $SHELL toolchain/bootstrap/format_file.sh; then + error "Formatting MFC source failed." + exit 1 + fi + + # Format Python files in examples/ + if ! find examples -type f 2>/dev/null | grep -E '\.(py)$' \ + | xargs --no-run-if-empty -L 1 -P ${JOBS:-1} $SHELL toolchain/bootstrap/format_python.sh; then + error "Formatting MFC examples failed." + exit 1 + fi -if ! find ${@:-benchmarks} -type f | grep -E '\.(py)$' \ - | xargs -L 1 -P ${JOBS:-1} $SHELL toolchain/bootstrap/format_python.sh; then - error "Formatting MFC examples failed." - exit 1 + # Format Python files in benchmarks/ + if ! find benchmarks -type f 2>/dev/null | grep -E '\.(py)$' \ + | xargs --no-run-if-empty -L 1 -P ${JOBS:-1} $SHELL toolchain/bootstrap/format_python.sh; then + error "Formatting MFC benchmarks failed." + exit 1 + fi fi ok "Done. MFC has been formatted." diff --git a/toolchain/bootstrap/format_python.sh b/toolchain/bootstrap/format_python.sh index c00213394e..3d95c7aadc 100644 --- a/toolchain/bootstrap/format_python.sh +++ b/toolchain/bootstrap/format_python.sh @@ -4,5 +4,10 @@ echo "> $1" -black -l 200 -x "$1" &>/dev/null 2>&1 +# Use autopep8 for Python formatting +# (black has issues with Python 3.12.5 which is common on HPC systems) +if ! autopep8 --in-place --max-line-length 200 "$1" 2>&1; then + error "Failed to format $1 with autopep8" + exit 1 +fi diff --git a/toolchain/bootstrap/lint.sh b/toolchain/bootstrap/lint.sh index 0c3d2a1fd5..2a28bd18ef 100644 --- a/toolchain/bootstrap/lint.sh +++ b/toolchain/bootstrap/lint.sh @@ -2,9 +2,19 @@ set -e set -o pipefail +# Parse arguments +RUN_TESTS=true +for arg in "$@"; do + case $arg in + --no-test) + RUN_TESTS=false + ;; + esac +done + log "(venv) Running$MAGENTA pylint$COLOR_RESET on$MAGENTA MFC$COLOR_RESET's $MAGENTA""toolchain$COLOR_RESET." -pylint -d R1722,W0718,C0301,C0116,C0115,C0114,C0410,W0622,W0640,C0103,W1309,C0411,W1514,R0401,W0511,C0321,C3001 "$(pwd)/toolchain/" +pylint -d R1722,W0718,C0301,C0116,C0115,C0114,C0410,W0622,W0640,C0103,W1309,C0411,W1514,R0401,W0511,C0321,C3001,R0801,R0911,R0912 "$(pwd)/toolchain/" log "(venv) Running$MAGENTA pylint$COLOR_RESET on$MAGENTA MFC$COLOR_RESET's $MAGENTA""examples$COLOR_RESET." @@ -14,4 +24,15 @@ log "(venv) Running$MAGENTA pylint$COLOR_RESET on$MAGENTA MFC$COLOR_RESET's $MAG pylint -d C0103,C0114,C0301,R0801,C0410,W0611,W1514,E0401,C0115,C0116,C0200,W1309,W0401,E0602,R1720,W0614,E1101 $(pwd)/benchmarks/*/case.py -exit $? +# Run toolchain unit tests unless --no-test is specified +if [ "$RUN_TESTS" = true ]; then + log "(venv) Running$MAGENTA unit tests$COLOR_RESET on$MAGENTA MFC$COLOR_RESET's $MAGENTA""toolchain$COLOR_RESET." + + # Run tests as modules from the toolchain directory to resolve relative imports + cd "$(pwd)/toolchain" + python3 -m unittest mfc.params_tests.test_registry mfc.params_tests.test_definitions mfc.params_tests.test_validate mfc.params_tests.test_integration -v + python3 -m unittest mfc.cli.test_cli -v + cd - > /dev/null +fi + +exit 0 diff --git a/toolchain/bootstrap/python.sh b/toolchain/bootstrap/python.sh index dc6f0c1de9..d2c087a26c 100644 --- a/toolchain/bootstrap/python.sh +++ b/toolchain/bootstrap/python.sh @@ -129,7 +129,12 @@ ok "(venv) Entered the $MAGENTA$(python3 --version)$COLOR_RESET virtual environm # (or) # - The pyproject.toml file has changed if ! cmp "$(pwd)/toolchain/pyproject.toml" "$(pwd)/build/pyproject.toml" > /dev/null 2>&1; then - log "(venv) (Re)Installing mfc.sh's Python dependencies (via Pip)." + # Check if this is a fresh install (no previous pyproject.toml in build/) + if [ ! -f "$(pwd)/build/pyproject.toml" ]; then + log "(venv) Installing$MAGENTA Python packages$COLOR_RESET..." + else + log "(venv) Updating Python dependencies..." + fi next_arg=0 nthreads=1 @@ -145,17 +150,243 @@ if ! cmp "$(pwd)/toolchain/pyproject.toml" "$(pwd)/build/pyproject.toml" > /dev/ fi done - if ! PIP_DISABLE_PIP_VERSION_CHECK=1 MAKEFLAGS=$nthreads pip3 install "$(pwd)/toolchain"; then - error "(venv) Installation failed." + # Run package installer and show progress + PIP_LOG="$(pwd)/build/.pip_install.log" + + # Bootstrap uv if not available (uv is 10-100x faster than pip) + # Installing uv itself is quick (~2-3 seconds) and pays off immediately + if ! command -v uv > /dev/null 2>&1; then + log "(venv) Installing$MAGENTA uv$COLOR_RESET package manager for fast installation..." + if PIP_DISABLE_PIP_VERSION_CHECK=1 pip3 install uv > "$PIP_LOG" 2>&1; then + ok "(venv) Installed$MAGENTA uv$COLOR_RESET." + else + # uv install failed, fall back to pip for everything + warn "(venv) Could not install uv, falling back to pip (slower)." + fi + fi + + # Now check if uv is available (either was already installed or we just installed it) + USE_UV=0 + if command -v uv > /dev/null 2>&1; then + USE_UV=1 + fi + + # Use uv if available, otherwise fall back to pip + if [ "$USE_UV" = "1" ]; then + # uv is much faster and has its own progress display - show it + # UV_LINK_MODE=copy avoids slow hardlink failures on cross-filesystem installs (common on HPC) + export UV_LINK_MODE=copy + log "(venv) Using$MAGENTA uv$COLOR_RESET for fast installation..." + if [ -t 1 ]; then + # Interactive terminal: show uv's native progress + if uv pip install "$(pwd)/toolchain"; then + ok "(venv) Installation succeeded." + cp "$(pwd)/toolchain/pyproject.toml" "$(pwd)/build/" + else + error "(venv) Installation failed." + log "(venv) Exiting the$MAGENTA Python$COLOR_RESET virtual environment." + deactivate + exit 1 + fi + else + # Non-interactive: capture output for logging + if uv pip install "$(pwd)/toolchain" > "$PIP_LOG" 2>&1; then + rm -f "$PIP_LOG" + ok "(venv) Installation succeeded." + cp "$(pwd)/toolchain/pyproject.toml" "$(pwd)/build/" + else + error "(venv) Installation failed. See output below:" + echo "" + cat "$PIP_LOG" + echo "" + log "(venv) Exiting the$MAGENTA Python$COLOR_RESET virtual environment." + deactivate + rm -f "$PIP_LOG" + exit 1 + fi + fi + else + # Fall back to pip (slower, show progress bar) + PIP_DISABLE_PIP_VERSION_CHECK=1 MAKEFLAGS=$nthreads pip3 install "$(pwd)/toolchain" > "$PIP_LOG" 2>&1 & + PIP_PID=$! + fi + + # Only run progress bar for pip (uv handles its own output and already completed above) + if [ "$USE_UV" = "0" ]; then + + # Check if we're in an interactive terminal + if [ -t 1 ]; then + IS_TTY=1 + else + IS_TTY=0 + fi + + # Progress bar configuration + # Two phases: Collecting (60%) and Installing (40%) + TOTAL_PKGS=70 # Initial estimate, adjusts dynamically + BAR_WIDTH=30 + LAST_MILESTONE=0 + LAST_PHASE="" + START_TIME=$SECONDS + FIRST_PRINT=1 + + while kill -0 $PIP_PID 2>/dev/null; do + # Determine current phase and count from log + PHASE="resolving" + COUNT=0 + BUILD_COUNT=0 + CURRENT_PKG="" + + if [ -f "$PIP_LOG" ]; then + # Count packages being collected (dependency resolution) + COUNT=$(grep -c "^Collecting" "$PIP_LOG" 2>/dev/null | tr -d '[:space:]') + COUNT=${COUNT:-0} + if ! [[ "$COUNT" =~ ^[0-9]+$ ]]; then + COUNT=0 + fi + + # Count wheels being built + BUILD_COUNT=$(grep -c "Building wheel" "$PIP_LOG" 2>/dev/null | tr -d '[:space:]') + BUILD_COUNT=${BUILD_COUNT:-0} + if ! [[ "$BUILD_COUNT" =~ ^[0-9]+$ ]]; then + BUILD_COUNT=0 + fi + + # Check if we're in the installing phase + if grep -q "Installing collected packages" "$PIP_LOG" 2>/dev/null; then + PHASE="installing" + elif [ "$BUILD_COUNT" -gt 0 ]; then + PHASE="building" + fi + + # Extract the current package being processed + CURRENT_LINE=$(grep -E "^Collecting |^ Downloading |^ Building wheel for " "$PIP_LOG" 2>/dev/null | tail -1) + if [[ "$CURRENT_LINE" == Collecting* ]]; then + # "Collecting numpy>=1.21.0" -> "numpy" + CURRENT_PKG=$(echo "$CURRENT_LINE" | sed 's/^Collecting //; s/[<>=\[( ].*//; s/\[.*//') + elif [[ "$CURRENT_LINE" == *Downloading* ]]; then + # " Downloading numpy-1.24.0-cp312..." -> "numpy" + CURRENT_PKG=$(echo "$CURRENT_LINE" | sed 's/.*Downloading //; s/-[0-9].*//') + elif [[ "$CURRENT_LINE" == *"Building wheel"* ]]; then + # " Building wheel for numpy (pyproject.toml)" -> "numpy" + CURRENT_PKG=$(echo "$CURRENT_LINE" | sed 's/.*Building wheel for //; s/ .*//') + fi + fi + + ELAPSED=$((SECONDS - START_TIME)) + + if [ "$IS_TTY" = "1" ]; then + # Calculate progress based on phase + # Phase 1 (0-60%): Collecting dependencies + # Phase 2 (60-80%): Building wheels + # Phase 3 (80-100%): Installing + + if [ "$COUNT" -gt "$TOTAL_PKGS" ]; then + TOTAL_PKGS=$COUNT + fi + + if [ "$PHASE" = "installing" ]; then + PERCENT=90 + STATUS="Installing..." + elif [ "$PHASE" = "building" ]; then + # During building, progress from 60-80% + PERCENT=$((60 + BUILD_COUNT * 2)) + if [ "$PERCENT" -gt 80 ]; then + PERCENT=80 + fi + STATUS="Building ($BUILD_COUNT wheels)" + else + # During collecting, progress from 0-60% + PERCENT=$((COUNT * 60 / TOTAL_PKGS)) + STATUS="$COUNT packages" + fi + + FILLED=$((PERCENT * BAR_WIDTH / 100)) + EMPTY=$((BAR_WIDTH - FILLED)) + + # Build the bar with Unicode blocks + BAR="" + for ((i=0; i= 2 and args_list[1] in ("-h", "--help"): + # ./mfc.sh --help -> show enhanced help + print_help() + sys.exit(0) + + if len(args_list) >= 3 and args_list[2] in ("-h", "--help"): + # ./mfc.sh --help -> show enhanced command help + command = args_list[1] + # Resolve alias + command = COMMAND_ALIASES.get(command, command) + # Print enhanced help, then let argparse show its help too + print_command_help(command, show_argparse=True) + # Return command so argparse can show its help + return command + return None -from .run.run import get_baked_templates -from .build import TARGETS, DEFAULT_TARGETS -from .common import MFCException, format_list_to_string -from .test.cases import list_cases -from .state import gpuConfigOptions, MFCConfig # pylint: disable=too-many-locals, too-many-branches, too-many-statements def parse(config: MFCConfig): - parser = argparse.ArgumentParser( - prog="./mfc.sh", - description="""\ -Welcome to the MFC master script. This tool automates and manages building, testing, \ -running, and cleaning of MFC in various configurations on all supported platforms. \ -The README documents this tool and its various commands in more detail. To get \ -started, run ./mfc.sh build -h.""", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - - # Here are all of the parser arguments that call functions in other python files - parsers = parser.add_subparsers(dest="command") - run = parsers.add_parser(name="run", help="Run a case with MFC.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - test = parsers.add_parser(name="test", help="Run MFC's test suite.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - build = parsers.add_parser(name="build", help="Build MFC and its dependencies.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - clean = parsers.add_parser(name="clean", help="Clean build artifacts.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - bench = parsers.add_parser(name="bench", help="Benchmark MFC (for CI).", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - bench_diff = parsers.add_parser(name="bench_diff", help="Compare MFC Benchmarks (for CI).", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - count = parsers.add_parser(name="count", help="Count LOC in MFC.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - count_diff = parsers.add_parser(name="count_diff", help="Count LOC in MFC.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - packer = parsers.add_parser(name="packer", help="Packer utility (pack/unpack/compare).", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - # These parser arguments all call BASH scripts, and they only exist so that they show up in the help message - parsers.add_parser(name="load", help="Loads the MFC environment with source.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parsers.add_parser(name="lint", help="Lints all code after editing.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parsers.add_parser(name="format", help="Formats all code after editing.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parsers.add_parser(name="spelling", help="Runs the spell checker after editing.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - packers = packer.add_subparsers(dest="packer") - pack = packers.add_parser(name="pack", help="Pack a case into a single file.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - pack.add_argument("input", metavar="INPUT", type=str, default="", help="Input file of case to pack.") - pack.add_argument("-o", "--output", metavar="OUTPUT", type=str, default=None, help="Base name of output file.") - - compare = packers.add_parser(name="compare", help="Compare two cases.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - compare.add_argument("input1", metavar="INPUT1", type=str, default=None, help="First pack file.") - compare.add_argument("input2", metavar="INPUT2", type=str, default=None, help="Second pack file.") - compare.add_argument("-rel", "--reltol", metavar="RELTOL", type=float, default=1e-12, help="Relative tolerance.") - compare.add_argument("-abs", "--abstol", metavar="ABSTOL", type=float, default=1e-12, help="Absolute tolerance.") - - def add_common_arguments(p: argparse.ArgumentParser, mask = None): - if mask is None: - mask = "" - - if "t" not in mask: - p.add_argument("-t", "--targets", metavar="TARGET", nargs="+", type=str.lower, choices=[ _.name for _ in TARGETS ], - default=[ _.name for _ in sorted(DEFAULT_TARGETS, key=lambda t: t.runOrder) ], - help=f"Space separated list of targets to act upon. Allowed values are: {format_list_to_string([ _.name for _ in TARGETS ])}.") - - if "m" not in mask: - for f in dataclasses.fields(config): - if f.name == 'gpu': - p.add_argument(f"--{f.name}", action="store", nargs='?', const= gpuConfigOptions.ACC.value,default=gpuConfigOptions.NONE.value, dest=f.name, choices=[e.value for e in gpuConfigOptions], help=f"Turn the {f.name} option to OpenACC or OpenMP.") - p.add_argument(f"--no-{f.name}", action="store_const", const = gpuConfigOptions.NONE.value, dest=f.name, help=f"Turn the {f.name} option OFF.") - continue - p.add_argument( f"--{f.name}", action="store_true", help=f"Turn the {f.name} option ON.") - p.add_argument(f"--no-{f.name}", action="store_false", dest=f.name, help=f"Turn the {f.name} option OFF.") - - p.set_defaults(**{ f.name: getattr(config, f.name) for f in dataclasses.fields(config) }) - - if "j" not in mask: - p.add_argument("-j", "--jobs", metavar="JOBS", type=int, default=1, help="Allows for JOBS concurrent jobs.") - - if "v" not in mask: - p.add_argument("-v", "--verbose", action="store_true", help="Enables verbose compiler & linker output.") - - if "g" not in mask: - p.add_argument("-g", "--gpus", nargs="+", type=int, default=None, help="(Optional GPU override) List of GPU #s to use (environment default if unspecified).") - - # BUILD - add_common_arguments(build, "g") - build.add_argument("-i", "--input", type=str, default=None, help="(GPU Optimization) Build a version of MFC optimized for a case.") - build.add_argument("--case-optimization", action="store_true", default=False, help="(GPU Optimization) Compile MFC targets with some case parameters hard-coded (requires --input).") - - # TEST - test_cases = list_cases() - - add_common_arguments(test, "t") - test.add_argument("-l", "--list", action="store_true", help="List all available tests.") - test.add_argument("-f", "--from", default=test_cases[0].get_uuid(), type=str, help="First test UUID to run.") - test.add_argument("-t", "--to", default=test_cases[-1].get_uuid(), type=str, help="Last test UUID to run.") - test.add_argument("-o", "--only", nargs="+", type=str, default=[], metavar="L", help="Only run tests with specified properties.") - test.add_argument("-a", "--test-all", action="store_true", default=False, help="Run the Post Process Tests too.") - test.add_argument("-%", "--percent", type=int, default=100, help="Percentage of tests to run.") - test.add_argument("-m", "--max-attempts", type=int, default=1, help="Maximum number of attempts to run a test.") - test.add_argument( "--rdma-mpi", action="store_true", default=False, help="Run tests with RDMA MPI enabled") - test.add_argument( "--no-build", action="store_true", default=False, help="(Testing) Do not rebuild MFC.") - test.add_argument( "--no-examples", action="store_true", default=False, help="Do not test example cases." ) - test.add_argument("--case-optimization", action="store_true", default=False, help="(GPU Optimization) Compile MFC targets with some case parameters hard-coded.") - test.add_argument( "--dry-run", action="store_true", default=False, help="Build and generate case files but do not run tests.") - - test_meg = test.add_mutually_exclusive_group() - test_meg.add_argument("--generate", action="store_true", default=False, help="(Test Generation) Generate golden files.") - test_meg.add_argument("--add-new-variables", action="store_true", default=False, help="(Test Generation) If new variables are found in D/ when running tests, add them to the golden files.") - test_meg.add_argument("--remove-old-tests", action="store_true", default=False, help="(Test Generation) Delete tests directories that are no longer.") - - # RUN - add_common_arguments(run) - run.add_argument("input", metavar="INPUT", type=str, help="Input file to run.") - run.add_argument("-e", "--engine", choices=["interactive", "batch"], type=str, default="interactive", help="Job execution/submission engine choice.") - run.add_argument("-p", "--partition", metavar="PARTITION", type=str, default="", help="(Batch) Partition for job submission.") - run.add_argument("-q", "--quality_of_service", metavar="QOS", type=str, default="", help="(Batch) Quality of Service for job submission.") - run.add_argument("-N", "--nodes", metavar="NODES", type=int, default=1, help="(Batch) Number of nodes.") - run.add_argument("-n", "--tasks-per-node", metavar="TASKS", type=int, default=1, help="Number of tasks per node.") - run.add_argument("-w", "--walltime", metavar="WALLTIME", type=str, default="01:00:00", help="(Batch) Walltime.") - run.add_argument("-a", "--account", metavar="ACCOUNT", type=str, default="", help="(Batch) Account to charge.") - run.add_argument("-@", "--email", metavar="EMAIL", type=str, default="", help="(Batch) Email for job notification.") - run.add_argument("-#", "--name", metavar="NAME", type=str, default="MFC", help="(Batch) Job name.") - run.add_argument("-s", "--scratch", action="store_true", default=False, help="Build from scratch.") - run.add_argument("-b", "--binary", choices=["mpirun", "jsrun", "srun", "mpiexec"], type=str, default=None, help="(Interactive) Override MPI execution binary") - run.add_argument( "--dry-run", action="store_true", default=False, help="(Batch) Run without submitting batch file.") - run.add_argument("--case-optimization", action="store_true", default=False, help="(GPU Optimization) Compile MFC targets with some case parameters hard-coded.") - run.add_argument( "--no-build", action="store_true", default=False, help="(Testing) Do not rebuild MFC.") - run.add_argument("--wait", action="store_true", default=False, help="(Batch) Wait for the job to finish.") - run.add_argument("-c", "--computer", metavar="COMPUTER", type=str, default="default", help=f"(Batch) Path to a custom submission file template or one of {format_list_to_string(list(get_baked_templates().keys()))}.") - run.add_argument("-o", "--output-summary", metavar="OUTPUT", type=str, default=None, help="Output file (YAML) for summary.") - run.add_argument("--clean", action="store_true", default=False, help="Clean the case before running.") - run.add_argument("--ncu", nargs=argparse.REMAINDER, type=str, help="Profile with NVIDIA Nsight Compute.") - run.add_argument("--nsys", nargs=argparse.REMAINDER, type=str, help="Profile with NVIDIA Nsight Systems.") - run.add_argument("--rcu", nargs=argparse.REMAINDER, type=str, help="Profile with ROCM rocprof-compute.") - run.add_argument("--rsys", nargs=argparse.REMAINDER, type=str, help="Profile with ROCM rocprof-systems.") - - # BENCH - add_common_arguments(bench) - bench.add_argument("-o", "--output", metavar="OUTPUT", default=None, type=str, required="True", help="Path to the YAML output file to write the results to.") - bench.add_argument("-m", "--mem", metavar="MEM", default=1, type=int, help="Memory per task for benchmarking cases") - - # BENCH_DIFF - add_common_arguments(bench_diff, "t") - bench_diff.add_argument("lhs", metavar="LHS", type=str, help="Path to a benchmark result YAML file.") - bench_diff.add_argument("rhs", metavar="RHS", type=str, help="Path to a benchmark result YAML file.") - - # COUNT - add_common_arguments(count, "g") - - # COUNT - add_common_arguments(count_diff, "g") + """Parse command line arguments using the CLI schema.""" + # Handle enhanced help before argparse + help_command = _handle_enhanced_help(sys.argv) + + # Generate parser from schema + parser, subparser_map = generate_parser(MFC_CLI_SCHEMA, config) + + # If enhanced help was printed, also show argparse help and exit + if help_command and help_command in subparser_map: + subparser_map[help_command].print_help() + sys.exit(0) try: extra_index = sys.argv.index('--') except ValueError: extra_index = len(sys.argv) + # Patch subparser error methods to show full help on error + attempted_command = _get_command_from_args(sys.argv) + if attempted_command and attempted_command in subparser_map: + subparser = subparser_map[attempted_command] + + def custom_error(message): + # Show enhanced help + full argparse help (like -h would) + print_command_help(attempted_command, show_argparse=False) + subparser.print_help() + sys.stdout.flush() # Ensure help prints before error + sys.stderr.write(f'\n{subparser.prog}: error: {message}\n') + sys.exit(2) + + subparser.error = custom_error + args: dict = vars(parser.parse_args(sys.argv[1:extra_index])) args["--"] = sys.argv[extra_index + 1:] + # Handle --help at top level + if args.get("help") and args["command"] is None: + print_help() + sys.exit(0) + + # Handle 'help' command + if args["command"] == "help": + topic = args.get("topic") + if topic: + print_topic_help(topic) + else: + print_help_topics() + sys.exit(0) + + # Resolve command aliases + if args["command"] in COMMAND_ALIASES: + args["command"] = COMMAND_ALIASES[args["command"]] + # Add default arguments of other subparsers - for name, parser in [("run", run), ("test", test), ("build", build), - ("clean", clean), ("count", count), ("count_diff", count_diff)]: + # This ensures all argument keys exist even for commands that don't define them + # Only process subparsers that have common arguments we need + relevant_subparsers = ["run", "test", "build", "clean", "count", "count_diff", "validate"] + for name in relevant_subparsers: if args["command"] == name: continue + if name not in subparser_map: + continue + + subparser = subparser_map[name] + # Parse with dummy input to get defaults (suppress errors for required positionals) + try: + # Commands with required positional input need a dummy value + if name in ["run", "validate"]: + vals, _ = subparser.parse_known_args(["dummy_input.py"]) + elif name == "build": + vals, _ = subparser.parse_known_args([]) + else: + vals, _ = subparser.parse_known_args([]) + except SystemExit: + continue # Skip if parsing fails - vals, _ = parser.parse_known_args(["-i", "None"]) for key, val in vars(vals).items(): if key == "input": args[key] = args.get(key) - elif not key in args: + elif key not in args: args[key] = args.get(key, val) if args["command"] is None: - parser.print_help() - exit(-1) + # Show welcome for first-time users, otherwise show enhanced help + if is_first_time_user(): + print_welcome() + else: + print_help() + sys.exit(0) - # "Slugify" the name of the job - args["name"] = re.sub(r'[\W_]+', '-', args["name"]) + # "Slugify" the name of the job (only for batch jobs, not for new command) + if args.get("name") is not None and isinstance(args["name"], str) and args["command"] != "new": + args["name"] = re.sub(r'[\W_]+', '-', args["name"]) # We need to check for some invalid combinations of arguments because of # the limitations of argparse. if args["command"] == "build": - if (args["input"] is not None) ^ args["case_optimization"] : + if (args["input"] is not None) ^ args["case_optimization"]: raise MFCException("./mfc.sh build's --case-optimization and --input must be used together.") if args["command"] == "run": if args["binary"] is not None and args["engine"] != "interactive": raise MFCException("./mfc.sh run's --binary can only be used with --engine=interactive.") + # Resolve test case defaults (deferred to avoid slow startup for non-test commands) + if args["command"] == "test": + from .test.cases import list_cases # pylint: disable=import-outside-toplevel + test_cases = list_cases() + if args.get("from") is None: + args["from"] = test_cases[0].get_uuid() + if args.get("to") is None: + args["to"] = test_cases[-1].get_uuid() + # Input files to absolute paths for e in ["input", "input1", "input2"]: if e not in args: diff --git a/toolchain/mfc/build.py b/toolchain/mfc/build.py index 4fd18a2f10..0ee07dc5fa 100644 --- a/toolchain/mfc/build.py +++ b/toolchain/mfc/build.py @@ -1,12 +1,288 @@ -import os, typing, hashlib, dataclasses +import os, typing, hashlib, dataclasses, subprocess, re, time, sys, threading, queue + +from rich.panel import Panel +from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn, TimeElapsedColumn, TaskProgressColumn from .case import Case from .printer import cons from .common import MFCException, system, delete_directory, create_directory, \ - format_list_to_string + format_list_to_string, debug from .state import ARG, CFG from .run import input from .state import gpuConfigOptions +from .user_guide import Tips + + +# Regex to parse build progress +# Ninja format: [42/156] Building Fortran object ... +_NINJA_PROGRESS_RE = re.compile(r'^\[(\d+)/(\d+)\]\s+(.*)$') +# Make format: [ 16%] Building Fortran object ... or [100%] Linking ... +_MAKE_PROGRESS_RE = re.compile(r'^\[\s*(\d+)%\]\s+(.*)$') + + +# pylint: disable=too-many-locals,too-many-branches,too-many-statements,too-many-nested-blocks +def _run_build_with_progress(command: typing.List[str], target_name: str, streaming: bool = False) -> subprocess.CompletedProcess: + """ + Run a build command with a progress bar that parses ninja output. + + Args: + command: The cmake build command to run + target_name: Name of the target being built + streaming: If True, print [X/Y] lines as they happen instead of progress bar (-v mode) + + Shows: + - Progress bar with file count (e.g., 42/156) + - Current file being compiled + - Elapsed time + + Falls back to spinner with elapsed time if ninja progress can't be parsed. + """ + cmd = [str(x) for x in command] + + # Check if we're in a TTY (interactive terminal) + is_tty = sys.stdout.isatty() + + # Collect all output for error reporting + all_stdout = [] + all_stderr = [] + + # Start the process (can't use 'with' since process is used in multiple branches) + process = subprocess.Popen( # pylint: disable=consider-using-with + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + bufsize=1 # Line buffered + ) + + if streaming: + # Streaming mode (-v): print build progress lines as they happen + cons.print(f" [bold blue]Building[/bold blue] [magenta]{target_name}[/magenta] [dim](-v)[/dim]...") + start_time = time.time() + + # Read stdout and print matching lines + for line in iter(process.stdout.readline, ''): + all_stdout.append(line) + stripped = line.strip() + + # Try ninja format first: [42/156] Action + ninja_match = _NINJA_PROGRESS_RE.match(stripped) + if ninja_match: + completed = ninja_match.group(1) + total = ninja_match.group(2) + action = ninja_match.group(3) + # Extract filename from action + parts = action.split() + if len(parts) >= 3: + filename = os.path.basename(parts[-1]).replace('.o', '').replace('.obj', '') + if len(filename) > 40: + filename = filename[:37] + "..." + cons.print(f" [dim][{completed}/{total}][/dim] {filename}") + continue + + # Try make format: [ 16%] Action + make_match = _MAKE_PROGRESS_RE.match(stripped) + if make_match: + percent = make_match.group(1) + action = make_match.group(2) + # Extract filename from action + parts = action.split() + if len(parts) >= 3: + # Get the last part which is usually the file path + obj_path = parts[-1] + filename = os.path.basename(obj_path).replace('.o', '').replace('.obj', '') + if len(filename) > 40: + filename = filename[:37] + "..." + cons.print(f" [dim][{percent:>3}%][/dim] {filename}") + + # Read any remaining stderr + stderr = process.stderr.read() + all_stderr.append(stderr) + process.wait() + + elapsed = time.time() - start_time + if elapsed > 5: + cons.print(f" [dim](build took {elapsed:.1f}s)[/dim]") + + return subprocess.CompletedProcess(cmd, process.returncode, ''.join(all_stdout), ''.join(all_stderr)) + + if not is_tty: + # Non-interactive, non-streaming: show message with elapsed time + cons.print(f" [bold blue]Building[/bold blue] [magenta]{target_name}[/magenta]...") + start_time = time.time() + stdout, stderr = process.communicate() + elapsed = time.time() - start_time + if elapsed > 5: # Only show time for longer builds + cons.print(f" [dim](build took {elapsed:.1f}s)[/dim]") + return subprocess.CompletedProcess(cmd, process.returncode, stdout, stderr) + + # Interactive: show progress bar + current_file = "" + total_files = 0 + completed_files = 0 + progress_detected = False + + # Create a custom progress display + with Progress( + SpinnerColumn(), + TextColumn("[bold blue]Building[/bold blue] [magenta]{task.fields[target]}[/magenta]"), + BarColumn(bar_width=30), + TaskProgressColumn(), + TextColumn("•"), + TimeElapsedColumn(), + TextColumn("[dim]{task.fields[current_file]}[/dim]"), + console=cons.raw, + transient=True, # Remove progress bar when done + refresh_per_second=4, + ) as progress: + # Start with indeterminate progress (total=None shows spinner behavior) + task = progress.add_task( + "build", + total=None, + target=target_name, + current_file="" + ) + + # Use threads to read stdout and stderr concurrently + stdout_queue = queue.Queue() + stderr_queue = queue.Queue() + + def read_stdout(): + for line in iter(process.stdout.readline, ''): + stdout_queue.put(line) + stdout_queue.put(None) # Signal EOF + + def read_stderr(): + for line in iter(process.stderr.readline, ''): + stderr_queue.put(line) + stderr_queue.put(None) # Signal EOF + + stdout_thread = threading.Thread(target=read_stdout, daemon=True) + stderr_thread = threading.Thread(target=read_stderr, daemon=True) + stdout_thread.start() + stderr_thread.start() + + stdout_done = False + stderr_done = False + + while not (stdout_done and stderr_done): + # Check stdout + try: + line = stdout_queue.get_nowait() + if line is None: + stdout_done = True + else: + all_stdout.append(line) + stripped = line.strip() + + # Try ninja format first: [42/156] Action + ninja_match = _NINJA_PROGRESS_RE.match(stripped) + if ninja_match: + completed_files = int(ninja_match.group(1)) + total_files = int(ninja_match.group(2)) + action = ninja_match.group(3) + + # Extract just the filename from the action + if action: + parts = action.split() + if len(parts) >= 3: + obj_path = parts[-1] + current_file = os.path.basename(obj_path).replace('.o', '').replace('.obj', '') + if len(current_file) > 30: + current_file = current_file[:27] + "..." + + if not progress_detected: + progress_detected = True + progress.update(task, total=total_files) + + progress.update( + task, + completed=completed_files, + current_file=current_file + ) + else: + # Try make format: [ 16%] Action + make_match = _MAKE_PROGRESS_RE.match(stripped) + if make_match: + percent = int(make_match.group(1)) + action = make_match.group(2) + + # Extract filename from action + if action: + parts = action.split() + if len(parts) >= 3: + obj_path = parts[-1] + current_file = os.path.basename(obj_path).replace('.o', '').replace('.obj', '') + if len(current_file) > 30: + current_file = current_file[:27] + "..." + + if not progress_detected: + progress_detected = True + # Make uses percentage, so set total to 100 + progress.update(task, total=100) + + progress.update( + task, + completed=percent, + current_file=current_file + ) + except queue.Empty: + pass + + # Check stderr + try: + line = stderr_queue.get_nowait() + if line is None: + stderr_done = True + else: + all_stderr.append(line) + except queue.Empty: + pass + + # Small sleep to avoid busy waiting + if not stdout_done or not stderr_done: + time.sleep(0.01) + + # Wait for process to complete + process.wait() + + # Ensure threads are done + stdout_thread.join(timeout=1) + stderr_thread.join(timeout=1) + + return subprocess.CompletedProcess( + cmd, + process.returncode, + ''.join(all_stdout), + ''.join(all_stderr) + ) + + +def _show_build_error(result: subprocess.CompletedProcess, stage: str): + """Display build error details from captured subprocess output.""" + cons.print() + cons.print(f"[bold red]{stage} Failed - Error Details:[/bold red]") + + # Show stdout if available (often contains the actual error for CMake) + if result.stdout: + stdout_text = result.stdout if isinstance(result.stdout, str) else result.stdout.decode('utf-8', errors='replace') + stdout_lines = stdout_text.strip().split('\n') + # Show last 40 lines to capture the relevant error + if len(stdout_lines) > 40: + stdout_lines = ['... (truncated) ...'] + stdout_lines[-40:] + if stdout_lines and stdout_lines != ['']: + cons.raw.print(Panel('\n'.join(stdout_lines), title="Output", border_style="yellow")) + + # Show stderr if available + if result.stderr: + stderr_text = result.stderr if isinstance(result.stderr, str) else result.stderr.decode('utf-8', errors='replace') + stderr_lines = stderr_text.strip().split('\n') + if len(stderr_lines) > 40: + stderr_lines = ['... (truncated) ...'] + stderr_lines[-40:] + if stderr_lines and stderr_lines != ['']: + cons.raw.print(Panel('\n'.join(stderr_lines), title="Errors", border_style="red")) + + cons.print() @dataclasses.dataclass class MFCTarget: @@ -141,7 +417,8 @@ def configure(self, case: Case): f"-DMFC_MIXED_PRECISION={'ON' if ARG('mixed') else 'OFF'}" ] - if ARG("verbose"): + # Verbosity level 3 (-vvv): add cmake debug flags + if ARG("verbose") >= 3: flags.append('--debug-find') if not self.isDependency: @@ -161,9 +438,31 @@ def configure(self, case: Case): case.generate_fpp(self) - if system(command).returncode != 0: + debug(f"Configuring {self.name} in {build_dirpath}") + debug(f"CMake flags: {' '.join(flags)}") + + verbosity = ARG('verbose') + if verbosity >= 2: + # -vv or higher: show raw cmake output + level_str = "vv" + "v" * (verbosity - 2) if verbosity > 2 else "vv" + cons.print(f" [bold blue]Configuring[/bold blue] [magenta]{self.name}[/magenta] [dim](-{level_str})[/dim]...") + if verbosity >= 2: + cons.print(f" [dim]$ {' '.join(str(c) for c in command)}[/dim]") + cons.print() + result = system(command, print_cmd=False) + else: + # Normal mode: capture output, show on error + cons.print(f" [bold blue]Configuring[/bold blue] [magenta]{self.name}[/magenta]...") + result = system(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, print_cmd=False) + + if result.returncode != 0: + cons.print(f" [bold red]✗[/bold red] Configuration failed for [magenta]{self.name}[/magenta]") + if verbosity < 2: + _show_build_error(result, "Configuration") + Tips.after_build_failure() raise MFCException(f"Failed to configure the [bold magenta]{self.name}[/bold magenta] target.") + cons.print(f" [bold green]✓[/bold green] Configured [magenta]{self.name}[/magenta]") cons.print(no_indent=True) def build(self, case: input.MFCInputFile): @@ -173,20 +472,53 @@ def build(self, case: input.MFCInputFile): "--target", self.name, "--parallel", ARG("jobs"), "--config", 'Debug' if ARG('debug') else 'Release'] - if ARG('verbose'): + + verbosity = ARG('verbose') + # -vv or higher: add cmake --verbose flag for full compiler commands + if verbosity >= 2: command.append("--verbose") - if system(command).returncode != 0: + debug(f"Building {self.name} with {ARG('jobs')} parallel jobs") + debug(f"Build command: {' '.join(str(c) for c in command)}") + + if verbosity >= 2: + # -vv or higher: show raw compiler output (full verbose) + level_str = "vv" + "v" * (verbosity - 2) if verbosity > 2 else "vv" + cons.print(f" [bold blue]Building[/bold blue] [magenta]{self.name}[/magenta] [dim](-{level_str})[/dim]...") + cons.print(f" [dim]$ {' '.join(str(c) for c in command)}[/dim]") + cons.print() + result = system(command, print_cmd=False) + elif verbosity == 1: + # -v: show ninja [X/Y] lines as they compile (streaming, no progress bar) + result = _run_build_with_progress(command, self.name, streaming=True) + else: + # Default: show progress bar + result = _run_build_with_progress(command, self.name, streaming=False) + + if result.returncode != 0: + cons.print(f" [bold red]✗[/bold red] Build failed for [magenta]{self.name}[/magenta]") + if verbosity < 2: + _show_build_error(result, "Build") + Tips.after_build_failure() raise MFCException(f"Failed to build the [bold magenta]{self.name}[/bold magenta] target.") + cons.print(f" [bold green]✓[/bold green] Built [magenta]{self.name}[/magenta]") cons.print(no_indent=True) def install(self, case: input.MFCInputFile): command = ["cmake", "--install", self.get_staging_dirpath(case)] - if system(command).returncode != 0: + # Show progress indicator during install + cons.print(f" [bold blue]Installing[/bold blue] [magenta]{self.name}[/magenta]...") + + # Capture output to show detailed errors on failure + result = system(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, print_cmd=False) + if result.returncode != 0: + cons.print(f" [bold red]✗[/bold red] Install failed for [magenta]{self.name}[/magenta]") + _show_build_error(result, "Install") raise MFCException(f"Failed to install the [bold magenta]{self.name}[/bold magenta] target.") + cons.print(f" [bold green]✓[/bold green] Installed [magenta]{self.name}[/magenta]") cons.print(no_indent=True) # name flags isDep isDef isReq dependencies run order diff --git a/toolchain/mfc/case.py b/toolchain/mfc/case.py index 774ab08c9a..2d58c0dc7d 100644 --- a/toolchain/mfc/case.py +++ b/toolchain/mfc/case.py @@ -1,12 +1,17 @@ -import re, json, math, copy, dataclasses, fastjsonschema +# pylint: disable=import-outside-toplevel +import re, json, math, copy, dataclasses, difflib, fastjsonschema from . import common -from . import build from .printer import cons from .state import ARG from .run import case_dicts + +def _suggest_similar_params(unknown_key: str, valid_keys: list, n: int = 3) -> list: + """Find similar parameter names for typo suggestions.""" + return difflib.get_close_matches(unknown_key, valid_keys, n=n, cutoff=0.6) + QPVF_IDX_VARS = { 'alpha_rho': 'contxb', 'vel' : 'momxb', 'pres': 'E_idx', 'alpha': 'advxb', 'tau_e': 'stress_idx%beg', 'Y': 'chemxb', @@ -38,6 +43,7 @@ def gen_json_dict_str(self) -> str: return json.dumps(self.params, indent=4) def get_inp(self, _target) -> str: + from . import build # pylint: disable=import-outside-toplevel target = build.get_target(_target) cons.print(f"Generating [magenta]{target.name}.inp[/magenta]:") @@ -64,7 +70,9 @@ def get_inp(self, _target) -> str: ignored.append(key) if key not in case_dicts.ALL: - raise common.MFCException(f"MFCInputFile::dump: Case parameter '{key}' is not used by any MFC code. Please check your spelling or add it as a new parameter.") + suggestions = _suggest_similar_params(key, list(case_dicts.ALL.keys())) + hint = f" Did you mean: {', '.join(suggestions)}?" if suggestions else "" + raise common.MFCException(f"Unknown parameter '{key}'.{hint}") cons.print(f"[yellow]INFO:[/yellow] Forwarded {len(self.params)-len(ignored)}/{len(self.params)} parameters.") cons.unindent() @@ -72,17 +80,37 @@ def get_inp(self, _target) -> str: return f"&user_inputs\n{dict_str}&end/\n" def validate_params(self, origin_txt: str = None): - '''Typechecks parameters read from case file. If a parameter - is assigned a vlaie of the wrong type, this method throws an exception - highlighting the violating parameter and specifying what it expects.''' + '''Validates parameters read from case file: + 1. Type checking via JSON schema + 2. Constraint validation (valid values, ranges) + 3. Dependency checking (required/recommended params) + ''' + # Type checking try: case_dicts.get_validator()(self.params) except fastjsonschema.JsonSchemaException as e: if origin_txt: raise common.MFCException(f"{origin_txt}: {e}") - raise common.MFCException(f"{e}") + # Constraint and dependency validation + from .params.validate import validate_case + + errors, warnings = validate_case(self.params) + + # Show warnings (non-fatal) + if warnings: + cons.print() + for w in warnings: + cons.print(f"[yellow]Warning:[/yellow] {w}") + + # Raise errors (fatal) + if errors: + error_msg = "\n".join(f" - {e}" for e in errors) + if origin_txt: + raise common.MFCException(f"{origin_txt}:\n{error_msg}") + raise common.MFCException(f"Validation errors:\n{error_msg}") + def __get_ndims(self) -> int: return 1 + min(int(self.params.get("n", 0)), 1) + min(int(self.params.get("p", 0)), 1) @@ -358,6 +386,8 @@ def __get_pre_fpp(self, print: bool) -> str: return out def get_fpp(self, target, print = True) -> str: + from . import build # pylint: disable=import-outside-toplevel + def _prepend() -> str: return f"""\ #:set chemistry = {self.params.get("chemistry", 'F') == 'T'} diff --git a/toolchain/mfc/case_validator.py b/toolchain/mfc/case_validator.py index ee501cb5ac..fad1243111 100644 --- a/toolchain/mfc/case_validator.py +++ b/toolchain/mfc/case_validator.py @@ -14,10 +14,31 @@ # pylint: disable=too-many-lines # Justification: Comprehensive validator covering all MFC parameter constraints -from typing import Dict, Any, List +from typing import Dict, Any, List, Set +from functools import lru_cache from .common import MFCException +@lru_cache(maxsize=1) +def _get_logical_params_from_registry() -> Set[str]: + """ + Get all LOG-type parameter names from the registry. + + This replaces the hardcoded logical_params list with a dynamic lookup, + ensuring all LOG parameters are validated without manual maintenance. + + Returns: + Set of parameter names that have LOG type. + """ + from .params import REGISTRY # pylint: disable=import-outside-toplevel + from .params.schema import ParamType # pylint: disable=import-outside-toplevel + + return { + name for name, param in REGISTRY.all_params.items() + if param.param_type == ParamType.LOG + } + + class CaseConstraintError(MFCException): """Exception raised when case parameters violate constraints""" @@ -42,6 +63,37 @@ def prohibit(self, condition: bool, message: str): if condition: self.errors.append(message) + def _validate_logical(self, key: str): + """Validate that a parameter is a valid Fortran logical ('T' or 'F').""" + val = self.get(key) + if val is not None and val not in ('T', 'F'): + self.errors.append( + f"{key} must be 'T' or 'F', got '{val}'" + ) + + def check_parameter_types(self): + """Validate parameter types before other checks. + + This catches invalid values early with clear error messages, + rather than letting them cause confusing failures later. + + LOG parameters are discovered dynamically from the registry, + eliminating the need to maintain a hardcoded list. + """ + # Validate all LOG-type parameters from registry + logical_params = _get_logical_params_from_registry() + for param in logical_params: + if param in self.params: # Only validate params that are set + self._validate_logical(param) + + # Required domain parameters when m > 0 + m = self.get('m') + if m is not None and m > 0: + self.prohibit(not self.is_set('x_domain%beg'), + "x_domain%beg must be set when m > 0") + self.prohibit(not self.is_set('x_domain%end'), + "x_domain%end must be set when m > 0") + # =================================================================== # Common Checks (All Stages) # =================================================================== @@ -232,7 +284,7 @@ def check_boundary_conditions(self): # pylint: disable=too-many-locals self.prohibit(bc_z_end is not None and bc_z_end not in [-1, -2], "bc_z%end must be -1 (periodic) or -2 (reflective) for 3D cylindrical coordinates") - def check_bubbles_euler(self): + def check_bubbles_euler(self): # pylint: disable=too-many-locals """Checks constraints on bubble parameters""" bubbles_euler = self.get('bubbles_euler', 'F') == 'T' @@ -262,13 +314,54 @@ def check_bubbles_euler(self): "Bubble models untested with pi-gamma model (model_eqns = 1)") self.prohibit(model_eqns == 4 and rhoref is None, "rhoref must be set if using bubbles_euler with model_eqns = 4") + self.prohibit(rhoref is not None and rhoref <= 0, + "rhoref (reference density) must be positive") self.prohibit(model_eqns == 4 and pref is None, "pref must be set if using bubbles_euler with model_eqns = 4") + self.prohibit(pref is not None and pref <= 0, + "pref (reference pressure) must be positive") self.prohibit(model_eqns == 4 and num_fluids != 1, "4-equation model (model_eqns = 4) is single-component and requires num_fluids = 1") self.prohibit(cyl_coord, "Bubble models untested in cylindrical coordinates") + # === BUBBLE PHYSICS PARAMETERS === + # Validate bubble reference parameters (bub_pp%) + R0ref = self.get('bub_pp%R0ref') + p0ref = self.get('bub_pp%p0ref') + rho0ref = self.get('bub_pp%rho0ref') + T0ref = self.get('bub_pp%T0ref') + + if R0ref is not None: + self.prohibit(R0ref <= 0, + "bub_pp%R0ref (reference bubble radius) must be positive") + if p0ref is not None: + self.prohibit(p0ref <= 0, + "bub_pp%p0ref (reference pressure) must be positive") + if rho0ref is not None: + self.prohibit(rho0ref <= 0, + "bub_pp%rho0ref (reference density) must be positive") + if T0ref is not None: + self.prohibit(T0ref <= 0, + "bub_pp%T0ref (reference temperature) must be positive") + + # Viscosities must be non-negative + mu_l = self.get('bub_pp%mu_l') + mu_g = self.get('bub_pp%mu_g') + mu_v = self.get('bub_pp%mu_v') + + if mu_l is not None: + self.prohibit(mu_l < 0, "bub_pp%mu_l (liquid viscosity) must be non-negative") + if mu_g is not None: + self.prohibit(mu_g < 0, "bub_pp%mu_g (gas viscosity) must be non-negative") + if mu_v is not None: + self.prohibit(mu_v < 0, "bub_pp%mu_v (vapor viscosity) must be non-negative") + + # Surface tension must be non-negative + ss = self.get('bub_pp%ss') + if ss is not None: + self.prohibit(ss < 0, "bub_pp%ss (surface tension) must be non-negative") + def check_qbmm_and_polydisperse(self): """Checks constraints on QBMM and polydisperse bubble parameters""" polydisperse = self.get('polydisperse', 'F') == 'T' @@ -478,20 +571,31 @@ def check_riemann_solver(self): def check_time_stepping(self): """Checks time stepping parameters (simulation/post-process)""" cfl_dt = self.get('cfl_dt', 'F') == 'T' + cfl_adap_dt = self.get('cfl_adap_dt', 'F') == 'T' + adap_dt = self.get('adap_dt', 'F') == 'T' time_stepper = self.get('time_stepper') # Check time_stepper bounds self.prohibit(time_stepper is not None and (time_stepper < 1 or time_stepper > 3), "time_stepper must be 1, 2, or 3") - if cfl_dt: + # CFL-based variable dt modes (use t_stop/t_save for termination) + # Note: adap_dt is NOT included here - it uses t_step_* for termination + variable_dt = cfl_dt or cfl_adap_dt + + # dt validation (applies to all modes if dt is set) + dt = self.get('dt') + self.prohibit(dt is not None and dt <= 0, + "dt must be positive") + + if variable_dt: cfl_target = self.get('cfl_target') t_stop = self.get('t_stop') t_save = self.get('t_save') n_start = self.get('n_start') - self.prohibit(cfl_target is not None and (cfl_target < 0 or cfl_target > 1), - "cfl_target must be between 0 and 1") + self.prohibit(cfl_target is not None and (cfl_target <= 0 or cfl_target > 1), + "cfl_target must be in (0, 1]") self.prohibit(t_stop is not None and t_stop <= 0, "t_stop must be positive") self.prohibit(t_save is not None and t_save <= 0, @@ -500,21 +604,29 @@ def check_time_stepping(self): "t_save must be <= t_stop") self.prohibit(n_start is not None and n_start < 0, "n_start must be non-negative") - else: - t_step_start = self.get('t_step_start') - t_step_stop = self.get('t_step_stop') - t_step_save = self.get('t_step_save') - dt = self.get('dt') - - self.prohibit(t_step_start is not None and t_step_start < 0, - "t_step_start must be non-negative") - self.prohibit(t_step_stop is not None and t_step_start is not None and t_step_stop <= t_step_start, - "t_step_stop must be > t_step_start") - self.prohibit(t_step_save is not None and t_step_stop is not None and t_step_start is not None and - t_step_save > t_step_stop - t_step_start, - "t_step_save must be <= (t_step_stop - t_step_start)") - self.prohibit(dt is not None and dt <= 0, - "dt must be positive") + # t_step_* validation (applies to fixed and adap_dt modes) + t_step_start = self.get('t_step_start') + t_step_stop = self.get('t_step_stop') + t_step_save = self.get('t_step_save') + + self.prohibit(t_step_start is not None and t_step_start < 0, + "t_step_start must be non-negative") + self.prohibit(t_step_stop is not None and t_step_stop < 0, + "t_step_stop must be non-negative") + self.prohibit(t_step_stop is not None and t_step_start is not None and t_step_stop <= t_step_start, + "t_step_stop must be > t_step_start") + self.prohibit(t_step_save is not None and t_step_save <= 0, + "t_step_save must be positive") + self.prohibit(t_step_save is not None and t_step_stop is not None and t_step_start is not None and + t_step_save > t_step_stop - t_step_start, + "t_step_save must be <= (t_step_stop - t_step_start)") + + if not variable_dt: + # dt is required in pure fixed dt mode (not cfl_dt, not cfl_adap_dt) + # adap_dt mode uses dt as initial value, so it's optional + uses_fixed_stepping = self.is_set('t_step_start') or self.is_set('t_step_stop') + self.prohibit(uses_fixed_stepping and not adap_dt and not self.is_set('dt'), + "dt must be set when using fixed time stepping (t_step_start/t_step_stop)") def check_finite_difference(self): """Checks constraints on finite difference parameters""" @@ -866,6 +978,11 @@ def check_acoustic_source(self): # pylint: disable=too-many-locals,too-many-bra wave_set = wavelength is not None self.prohibit(freq_set == wave_set, f"One and only one of acoustic({jstr})%frequency or wavelength must be specified for pulse = {pulse}") + # Physics: frequency and wavelength must be positive + self.prohibit(frequency is not None and frequency <= 0, + f"acoustic({jstr})%frequency must be positive") + self.prohibit(wavelength is not None and wavelength <= 0, + f"acoustic({jstr})%wavelength must be positive") if pulse == 2: time_set = gauss_sigma_time is not None @@ -874,6 +991,11 @@ def check_acoustic_source(self): # pylint: disable=too-many-locals,too-many-bra f"One and only one of acoustic({jstr})%gauss_sigma_time or gauss_sigma_dist must be specified for pulse = 2") self.prohibit(delay is None, f"acoustic({jstr})%delay must be specified for pulse = 2 (Gaussian)") + # Physics: gaussian parameters must be positive + self.prohibit(gauss_sigma_time is not None and gauss_sigma_time <= 0, + f"acoustic({jstr})%gauss_sigma_time must be positive") + self.prohibit(gauss_sigma_dist is not None and gauss_sigma_dist <= 0, + f"acoustic({jstr})%gauss_sigma_dist must be positive") if pulse == 4: self.prohibit(bb_num_freq is None, @@ -1252,6 +1374,109 @@ def check_misc_pre_process(self): self.prohibit(elliptic_smoothing_iters < 1, "elliptic_smoothing_iters must be positive") + def _is_numeric(self, value) -> bool: + """Check if value is numeric (not a string expression).""" + return isinstance(value, (int, float)) and not isinstance(value, bool) + + def check_patch_physics(self): # pylint: disable=too-many-locals,too-many-branches + """Checks physics constraints on patch initial conditions (pre-process). + + Validates that initial conditions are physically meaningful: + - Pressure must be positive (thermodynamic requirement) + - Density (alpha_rho) must be positive + - Volume fractions must sum appropriately and be in [0, 1] + - Geometric dimensions must be positive + + Note: String values (analytical expressions like "0.5*sin(x)") are + evaluated at runtime by Fortran and cannot be validated here. + """ + num_patches = self.get('num_patches', 0) + num_fluids = self.get('num_fluids', 1) + bubbles_euler = self.get('bubbles_euler', 'F') == 'T' + num_ibs = self.get('num_ibs', 0) or 0 # IBM (Immersed Boundary Method) + + if num_patches is None or num_patches <= 0: + return + + for i in range(1, num_patches + 1): + istr = str(i) + geometry = self.get(f'patch_icpp({i})%geometry') + + # Skip if patch not defined + if geometry is None: + continue + + # Skip thermodynamic validation for special patches: + # - alter_patch patches (modifications to other patches) + # - hcid patches (hard-coded initial conditions computed at runtime) + hcid = self.get(f'patch_icpp({i})%hcid') + alter_patches = [self.get(f'patch_icpp({i})%alter_patch({j})') == 'T' + for j in range(1, num_patches + 1)] + is_special = hcid is not None or any(alter_patches) + + # === THERMODYNAMICS === + # Pressure must be positive for physical stability + # (skip for special patches where values are computed differently) + if not is_special: + pres = self.get(f'patch_icpp({i})%pres') + if pres is not None and self._is_numeric(pres): + self.prohibit(pres <= 0, + f"patch_icpp({istr})%pres must be positive (got {pres})") + + # === FLUID PROPERTIES === + # (skip for special patches where values are computed differently) + if not is_special: + for j in range(1, num_fluids + 1): + jstr = str(j) + + # Volume fraction must be in [0, 1] (or non-negative for IBM cases) + alpha = self.get(f'patch_icpp({i})%alpha({j})') + if alpha is not None and self._is_numeric(alpha): + self.prohibit(alpha < 0, + f"patch_icpp({istr})%alpha({jstr}) must be non-negative (got {alpha})") + # For non-IBM cases, alpha should be in [0, 1] + if num_ibs == 0: + self.prohibit(alpha > 1, + f"patch_icpp({istr})%alpha({jstr}) must be <= 1 (got {alpha})") + + # Density (alpha_rho) must be non-negative + # Note: alpha_rho = 0 is allowed for vacuum regions and numerical convenience + alpha_rho = self.get(f'patch_icpp({i})%alpha_rho({j})') + if alpha_rho is not None and self._is_numeric(alpha_rho): + self.prohibit(alpha_rho < 0, + f"patch_icpp({istr})%alpha_rho({jstr}) must be non-negative (got {alpha_rho})") + + # === GEOMETRY === + # Patch dimensions must be positive (except in cylindrical coords where + # length_y/length_z can be sentinel values like -1000000.0) + length_x = self.get(f'patch_icpp({i})%length_x') + length_y = self.get(f'patch_icpp({i})%length_y') + length_z = self.get(f'patch_icpp({i})%length_z') + radius = self.get(f'patch_icpp({i})%radius') + cyl_coord = self.get('cyl_coord', 'F') == 'T' + + if length_x is not None and self._is_numeric(length_x): + self.prohibit(length_x <= 0, + f"patch_icpp({istr})%length_x must be positive (got {length_x})") + # In cylindrical coordinates, length_y and length_z can be negative sentinel values + if length_y is not None and self._is_numeric(length_y) and not cyl_coord: + self.prohibit(length_y <= 0, + f"patch_icpp({istr})%length_y must be positive (got {length_y})") + if length_z is not None and self._is_numeric(length_z) and not cyl_coord: + self.prohibit(length_z <= 0, + f"patch_icpp({istr})%length_z must be positive (got {length_z})") + if radius is not None and self._is_numeric(radius): + self.prohibit(radius <= 0, + f"patch_icpp({istr})%radius must be positive (got {radius})") + + # === BUBBLES === + # Bubble radius must be positive + if bubbles_euler: + r0 = self.get(f'patch_icpp({i})%r0') + if r0 is not None and self._is_numeric(r0): + self.prohibit(r0 <= 0, + f"patch_icpp({istr})%r0 must be positive (got {r0})") + def check_bc_patches(self): # pylint: disable=too-many-branches,too-many-statements """Checks boundary condition patch geometry (pre-process)""" num_bc_patches = self.get('num_bc_patches', 0) @@ -1575,6 +1800,7 @@ def check_no_flow_variables(self): # pylint: disable=too-many-locals def validate_common(self): """Validate parameters common to all stages""" + self.check_parameter_types() # Type validation first self.check_simulation_domain() self.check_model_eqns_and_num_fluids() self.check_igr() @@ -1624,6 +1850,7 @@ def validate_pre_process(self): self.check_perturb_density() self.check_chemistry() self.check_misc_pre_process() + self.check_patch_physics() self.check_bc_patches() def validate_post_process(self): @@ -1670,9 +1897,34 @@ def validate(self, stage: str = 'simulation'): return if self.errors: - error_msg = "Case parameter constraint violations:\n" + "\n".join(f" • {err}" for err in self.errors) + error_msg = self._format_errors() raise CaseConstraintError(error_msg) + def _format_errors(self) -> str: + """Format errors with enhanced context and suggestions.""" + lines = ["[bold red]Case parameter constraint violations:[/bold red]\n"] + + for i, err in enumerate(self.errors, 1): + lines.append(f" [bold]{i}.[/bold] {err}") + + # Add helpful hints for common errors + err_lower = err.lower() + if "must be positive" in err_lower or "must be set" in err_lower: + lines.append(" [dim]Check that this required parameter is defined in your case file[/dim]") + elif "weno_order" in err_lower: + lines.append(" [dim]Valid values: 1, 3, 5, or 7[/dim]") + elif "riemann_solver" in err_lower: + lines.append(" [dim]Valid values: 1 (HLL), 2 (HLLC), 3 (Exact), etc.[/dim]") + elif "model_eqns" in err_lower: + lines.append(" [dim]Valid values: 1, 2 (5-eq), 3 (6-eq), or 4[/dim]") + elif "boundary" in err_lower or "bc_" in err_lower: + lines.append(" [dim]Common BC values: -1 (periodic), -2 (reflective), -3 (extrapolation)[/dim]") + + lines.append("") + lines.append("[dim]Tip: Run './mfc.sh validate case.py' for detailed validation[/dim]") + + return "\n".join(lines) + def validate_case_constraints(params: Dict[str, Any], stage: str = 'simulation'): """Convenience function to validate case parameters diff --git a/toolchain/mfc/clean.py b/toolchain/mfc/clean.py new file mode 100644 index 0000000000..3d9c2ff096 --- /dev/null +++ b/toolchain/mfc/clean.py @@ -0,0 +1,22 @@ +""" +MFC Clean Command - Remove build artifacts. +""" + +import os +import shutil + +from .printer import cons +from .common import MFC_BUILD_DIR + + +def clean(): + """Remove the build directory and all build artifacts.""" + if os.path.isdir(MFC_BUILD_DIR): + cons.print(f"Removing [bold magenta]{MFC_BUILD_DIR}[/bold magenta]...") + try: + shutil.rmtree(MFC_BUILD_DIR) + cons.print("[bold green]Build directory cleaned successfully.[/bold green]") + except OSError as e: + cons.print(f"[bold red]Error cleaning build directory:[/bold red] {e}") + else: + cons.print("[yellow]Build directory does not exist, nothing to clean.[/yellow]") diff --git a/toolchain/mfc/cli/__init__.py b/toolchain/mfc/cli/__init__.py new file mode 100644 index 0000000000..d8b5178a5b --- /dev/null +++ b/toolchain/mfc/cli/__init__.py @@ -0,0 +1,37 @@ +""" +CLI Schema and Auto-Generation Module. + +This package provides the single source of truth for MFC's CLI definition, +with generators for argparse, shell completions, and documentation. + +Usage: + from mfc.cli.commands import MFC_CLI_SCHEMA + from mfc.cli.argparse_gen import generate_parser + from mfc.cli.completion_gen import generate_bash_completion, generate_zsh_completion +""" + +from .schema import ( + CLISchema, + Command, + Argument, + Positional, + Example, + CommonArgumentSet, + MutuallyExclusiveGroup, + ArgAction, + CompletionType, + Completion, +) + +__all__ = [ + "CLISchema", + "Command", + "Argument", + "Positional", + "Example", + "CommonArgumentSet", + "MutuallyExclusiveGroup", + "ArgAction", + "CompletionType", + "Completion", +] diff --git a/toolchain/mfc/cli/argparse_gen.py b/toolchain/mfc/cli/argparse_gen.py new file mode 100644 index 0000000000..b95087b83b --- /dev/null +++ b/toolchain/mfc/cli/argparse_gen.py @@ -0,0 +1,226 @@ +""" +Generate argparse parsers from CLI schema. + +This module converts the declarative CLI schema into argparse ArgumentParsers. +""" + +import argparse +import dataclasses +from typing import Dict, Tuple + +from .schema import ( + CLISchema, Command, Argument, Positional, + ArgAction, CommonArgumentSet +) + + +def _action_to_argparse(action: ArgAction) -> str: + """Convert schema action to argparse action string.""" + return action.value + + +def _add_argument(parser: argparse.ArgumentParser, arg: Argument): + """Add a single argument to parser.""" + kwargs: Dict = { + "help": arg.help, + "default": arg.default, + } + + if arg.action != ArgAction.STORE: + kwargs["action"] = _action_to_argparse(arg.action) + else: + if arg.type is not None: + kwargs["type"] = arg.type + if arg.choices is not None: + kwargs["choices"] = arg.choices + if arg.nargs is not None: + # Handle "..." as REMAINDER + if arg.nargs == "...": + kwargs["nargs"] = argparse.REMAINDER + else: + kwargs["nargs"] = arg.nargs + if arg.metavar is not None: + kwargs["metavar"] = arg.metavar + if arg.required: + kwargs["required"] = arg.required + + if arg.dest is not None: + kwargs["dest"] = arg.dest + + if arg.const is not None: + kwargs["const"] = arg.const + + flags = arg.get_flags() + parser.add_argument(*flags, **kwargs) + + +def _add_positional(parser: argparse.ArgumentParser, pos: Positional): + """Add a positional argument to parser.""" + kwargs: Dict = { + "help": pos.help, + "metavar": pos.name.upper(), + } + + if pos.type != str: + kwargs["type"] = pos.type + if pos.nargs is not None: + kwargs["nargs"] = pos.nargs + if pos.default is not None: + kwargs["default"] = pos.default + if pos.choices is not None: + kwargs["choices"] = pos.choices + + parser.add_argument(pos.name, **kwargs) + + +def _add_mfc_config_arguments(parser: argparse.ArgumentParser, config): + """ + Add MFCConfig boolean pair arguments dynamically. + + This handles --mpi/--no-mpi, --gpu/--no-gpu, etc. from MFCConfig dataclass. + """ + # Import here to avoid circular dependency + from ..state import gpuConfigOptions # pylint: disable=import-outside-toplevel + + for f in dataclasses.fields(config): + if f.name == 'gpu': + parser.add_argument( + f"--{f.name}", + action="store", + nargs='?', + const=gpuConfigOptions.ACC.value, + default=gpuConfigOptions.NONE.value, + dest=f.name, + choices=[e.value for e in gpuConfigOptions], + help=f"Turn the {f.name} option to OpenACC or OpenMP." + ) + parser.add_argument( + f"--no-{f.name}", + action="store_const", + const=gpuConfigOptions.NONE.value, + dest=f.name, + help=f"Turn the {f.name} option OFF." + ) + else: + parser.add_argument( + f"--{f.name}", + action="store_true", + help=f"Turn the {f.name} option ON." + ) + parser.add_argument( + f"--no-{f.name}", + action="store_false", + dest=f.name, + help=f"Turn the {f.name} option OFF." + ) + + parser.set_defaults(**{ + f.name: getattr(config, f.name) + for f in dataclasses.fields(config) + }) + + +def _add_common_arguments( + parser: argparse.ArgumentParser, + command: Command, + common_sets: Dict[str, CommonArgumentSet], + config=None +): + """Add common arguments to a command parser.""" + for set_name in command.include_common: + common_set = common_sets.get(set_name) + if common_set is None: + continue + + # Handle MFC config flags specially + if common_set.mfc_config_flags and config is not None: + _add_mfc_config_arguments(parser, config) + else: + for arg in common_set.arguments: + _add_argument(parser, arg) + + +def _add_command_subparser( + subparsers, + cmd: Command, + common_sets: Dict[str, CommonArgumentSet], + config +) -> argparse.ArgumentParser: + """Add a single command's subparser and return it.""" + subparser = subparsers.add_parser( + name=cmd.name, + aliases=cmd.aliases or [], + help=cmd.help, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + # Add positional arguments first + for pos in cmd.positionals: + _add_positional(subparser, pos) + + # Add common arguments + _add_common_arguments(subparser, cmd, common_sets, config) + + # Add command-specific arguments + for arg in cmd.arguments: + _add_argument(subparser, arg) + + # Add mutually exclusive groups + for meg in cmd.mutually_exclusive: + group = subparser.add_mutually_exclusive_group(required=meg.required) + for arg in meg.arguments: + _add_argument(group, arg) + + # Handle subcommands (e.g., packer pack, packer compare) + if cmd.subcommands: + sub_subparsers = subparser.add_subparsers(dest=cmd.name) + for subcmd in cmd.subcommands: + sub_sub = sub_subparsers.add_parser( + name=subcmd.name, + help=subcmd.help, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + for pos in subcmd.positionals: + _add_positional(sub_sub, pos) + for arg in subcmd.arguments: + _add_argument(sub_sub, arg) + + return subparser + + +def generate_parser( + schema: CLISchema, + config=None # MFCConfig instance +) -> Tuple[argparse.ArgumentParser, Dict[str, argparse.ArgumentParser]]: + """ + Generate complete argparse parser from schema. + + Args: + schema: The CLI schema definition + config: Optional MFCConfig for dynamic boolean pairs + + Returns: + Tuple of (main parser, dict mapping command names to subparsers) + """ + parser = argparse.ArgumentParser( + prog=schema.prog, + description=schema.description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + add_help=False, # We handle --help ourselves + ) + + # Add top-level arguments + for arg in schema.arguments: + _add_argument(parser, arg) + + # Build common sets lookup + common_sets = {cs.name: cs for cs in schema.common_sets} + + # Add subparsers for commands + subparsers = parser.add_subparsers(dest="command") + subparser_map: Dict[str, argparse.ArgumentParser] = {} + + for cmd in schema.commands: + subparser_map[cmd.name] = _add_command_subparser(subparsers, cmd, common_sets, config) + + return parser, subparser_map diff --git a/toolchain/mfc/cli/commands.py b/toolchain/mfc/cli/commands.py new file mode 100644 index 0000000000..0f827a4b74 --- /dev/null +++ b/toolchain/mfc/cli/commands.py @@ -0,0 +1,1036 @@ +""" +MFC CLI Command Definitions - SINGLE SOURCE OF TRUTH + +All command definitions live here. This file is used to generate: +- argparse parsers +- bash/zsh completions +- user guide help content +- CLI reference documentation + +When adding a new command or option, ONLY modify this file. +Then run `./mfc.sh generate` to update completions. +""" +# pylint: disable=too-many-lines + +from .schema import ( + CLISchema, Command, Argument, Positional, Example, + ArgAction, Completion, CompletionType, + CommonArgumentSet, MutuallyExclusiveGroup +) + + +# ============================================================================= +# CONSTANTS (shared with other modules) +# ============================================================================= + +TARGET_NAMES = [ + 'fftw', 'hdf5', 'silo', 'lapack', 'hipfort', + 'pre_process', 'simulation', 'post_process', + 'syscheck', 'documentation' +] + +DEFAULT_TARGET_NAMES = ['pre_process', 'simulation', 'post_process'] + +TEMPLATE_NAMES = [ + 'bridges2', 'carpenter', 'carpenter-cray', 'default', + 'delta', 'deltaai', 'frontier', 'hipergator', 'nautilus', + 'oscar', 'phoenix', 'phoenix-bench', 'santis', 'tuo' +] + +GPU_OPTIONS = ['acc', 'mp'] + +ENGINE_OPTIONS = ['interactive', 'batch'] + +MPI_BINARIES = ['mpirun', 'jsrun', 'srun', 'mpiexec'] + + +# ============================================================================= +# COMMON ARGUMENT SETS +# ============================================================================= + +COMMON_TARGETS = CommonArgumentSet( + name="targets", + arguments=[ + Argument( + name="targets", + short="t", + help="Space-separated list of targets to act upon.", + nargs="+", + type=str, + default=DEFAULT_TARGET_NAMES, + choices=TARGET_NAMES, + metavar="TARGET", + completion=Completion(type=CompletionType.CHOICES, choices=TARGET_NAMES), + ), + ] +) + +COMMON_JOBS = CommonArgumentSet( + name="jobs", + arguments=[ + Argument( + name="jobs", + short="j", + help="Allows for JOBS concurrent jobs.", + type=int, + default=1, + metavar="JOBS", + ), + ] +) + +COMMON_VERBOSE = CommonArgumentSet( + name="verbose", + arguments=[ + Argument( + name="verbose", + short="v", + help="Increase verbosity level. Use -v, -vv, or -vvv for more detail.", + action=ArgAction.COUNT, + default=0, + ), + ] +) + +COMMON_DEBUG_LOG = CommonArgumentSet( + name="debug_log", + arguments=[ + Argument( + name="debug-log", + short="d", + help="Enable debug logging for troubleshooting.", + action=ArgAction.STORE_TRUE, + dest="debug_log", + ), + ] +) + +COMMON_GPUS = CommonArgumentSet( + name="gpus", + arguments=[ + Argument( + name="gpus", + short="g", + help="(Optional GPU override) List of GPU #s to use (environment default if unspecified).", + nargs="+", + type=int, + default=None, + ), + ] +) + +# MFCConfig flags are handled specially in argparse_gen.py +# This marker tells the generator to add --mpi/--no-mpi, --gpu/--no-gpu, etc. +COMMON_MFC_CONFIG = CommonArgumentSet( + name="mfc_config", + mfc_config_flags=True, + arguments=[], # Generated dynamically +) + + +# ============================================================================= +# COMMAND DEFINITIONS +# ============================================================================= + +BUILD_COMMAND = Command( + name="build", + aliases=["b"], + help="Build MFC and its dependencies.", + description="Build MFC targets with optional GPU support and case optimization.", + include_common=["targets", "mfc_config", "jobs", "verbose", "debug_log"], + arguments=[ + Argument( + name="input", + short="i", + help="(GPU Optimization) Build a version of MFC optimized for a case.", + type=str, + default=None, + completion=Completion(type=CompletionType.FILES_PY), + ), + Argument( + name="case-optimization", + help="(GPU Optimization) Compile MFC targets with some case parameters hard-coded (requires --input).", + action=ArgAction.STORE_TRUE, + default=False, + dest="case_optimization", + ), + ], + examples=[ + Example("./mfc.sh build", "Build all default targets (CPU)"), + Example("./mfc.sh build -j 8", "Build with 8 parallel jobs"), + Example("./mfc.sh build --gpu", "Build with GPU (OpenACC) support"), + Example("./mfc.sh build -i case.py --case-optimization -j 8", "Case optimization (10x faster!)"), + ], + key_options=[ + ("-j, --jobs N", "Number of parallel build jobs"), + ("-t, --targets", "Targets: pre_process, simulation, post_process"), + ("--gpu [acc|mp]", "Enable GPU support (OpenACC or OpenMP)"), + ("--case-optimization", "Hard-code case params for 10x speedup"), + ("--debug", "Build in debug mode"), + ], +) + +RUN_COMMAND = Command( + name="run", + aliases=["r"], + help="Run a case with MFC.", + description="Run an MFC simulation case interactively or submit as a batch job.", + include_common=["targets", "mfc_config", "jobs", "verbose", "debug_log", "gpus"], + positionals=[ + Positional( + name="input", + help="Input file to run.", + completion=Completion(type=CompletionType.FILES_PY), + ), + ], + arguments=[ + Argument( + name="engine", + short="e", + help="Job execution/submission engine choice.", + choices=ENGINE_OPTIONS, + default="interactive", + completion=Completion(type=CompletionType.CHOICES, choices=ENGINE_OPTIONS), + ), + Argument( + name="partition", + short="p", + help="(Batch) Partition for job submission.", + default="", + metavar="PARTITION", + ), + Argument( + name="quality_of_service", + short="q", + help="(Batch) Quality of Service for job submission.", + default="", + metavar="QOS", + ), + Argument( + name="nodes", + short="N", + help="(Batch) Number of nodes.", + type=int, + default=1, + metavar="NODES", + ), + Argument( + name="tasks-per-node", + short="n", + help="Number of tasks per node.", + type=int, + default=1, + metavar="TASKS", + dest="tasks_per_node", + ), + Argument( + name="walltime", + short="w", + help="(Batch) Walltime.", + default="01:00:00", + metavar="WALLTIME", + ), + Argument( + name="account", + short="a", + help="(Batch) Account to charge.", + default="", + metavar="ACCOUNT", + ), + Argument( + name="email", + short="@", + help="(Batch) Email for job notification.", + default="", + metavar="EMAIL", + ), + Argument( + name="name", + short="#", + help="(Batch) Job name.", + default="MFC", + metavar="NAME", + ), + Argument( + name="scratch", + short="s", + help="Build from scratch.", + action=ArgAction.STORE_TRUE, + default=False, + ), + Argument( + name="binary", + short="b", + help="(Interactive) Override MPI execution binary", + choices=MPI_BINARIES, + default=None, + completion=Completion(type=CompletionType.CHOICES, choices=MPI_BINARIES), + ), + Argument( + name="dry-run", + help="(Batch) Run without submitting batch file.", + action=ArgAction.STORE_TRUE, + default=False, + dest="dry_run", + ), + Argument( + name="case-optimization", + help="(GPU Optimization) Compile MFC targets with some case parameters hard-coded.", + action=ArgAction.STORE_TRUE, + default=False, + dest="case_optimization", + ), + Argument( + name="no-build", + help="(Testing) Do not rebuild MFC.", + action=ArgAction.STORE_TRUE, + default=False, + dest="no_build", + ), + Argument( + name="wait", + help="(Batch) Wait for the job to finish.", + action=ArgAction.STORE_TRUE, + default=False, + ), + Argument( + name="computer", + short="c", + help="(Batch) Path to a custom submission file template or one of the built-in templates.", + default="default", + metavar="COMPUTER", + completion=Completion(type=CompletionType.CHOICES, choices=TEMPLATE_NAMES), + ), + Argument( + name="output-summary", + short="o", + help="Output file (YAML) for summary.", + default=None, + metavar="OUTPUT", + dest="output_summary", + ), + Argument( + name="clean", + help="Clean the case before running.", + action=ArgAction.STORE_TRUE, + default=False, + ), + # Profiler arguments with REMAINDER + Argument( + name="ncu", + help="Profile with NVIDIA Nsight Compute.", + nargs="...", # REMAINDER + type=str, + ), + Argument( + name="nsys", + help="Profile with NVIDIA Nsight Systems.", + nargs="...", # REMAINDER + type=str, + ), + Argument( + name="rcu", + help="Profile with ROCM rocprof-compute.", + nargs="...", # REMAINDER + type=str, + ), + Argument( + name="rsys", + help="Profile with ROCM rocprof-systems.", + nargs="...", # REMAINDER + type=str, + ), + ], + examples=[ + Example("./mfc.sh run case.py", "Run interactively with 1 rank"), + Example("./mfc.sh run case.py -n 4", "Run with 4 MPI ranks"), + Example("./mfc.sh run case.py --case-optimization -j 8", "10x faster with case optimization!"), + Example("./mfc.sh run case.py -e batch -N 2 -n 4", "Submit batch job: 2 nodes, 4 ranks/node"), + ], + key_options=[ + ("--case-optimization", "Hard-code params for 10x speedup!"), + ("-n, --tasks-per-node", "MPI ranks per node"), + ("-N, --nodes", "Number of nodes (batch)"), + ("-e, --engine", "interactive or batch"), + ("-a, --account", "Account to charge (batch)"), + ("-w, --walltime", "Wall time limit (batch)"), + ], +) + +TEST_COMMAND = Command( + name="test", + aliases=["t"], + help="Run MFC's test suite.", + description="Run MFC's test suite with various filtering and generation options.", + include_common=["mfc_config", "jobs", "verbose", "debug_log", "gpus"], + # Note: does NOT include "targets" - test uses different target handling + arguments=[ + Argument( + name="list", + short="l", + help="List all available tests.", + action=ArgAction.STORE_TRUE, + ), + Argument( + name="from", + short="f", + help="First test UUID to run.", + default=None, + type=str, + ), + Argument( + name="to", + short="t", + help="Last test UUID to run.", + default=None, + type=str, + ), + Argument( + name="only", + short="o", + help="Only run tests with specified properties.", + nargs="+", + type=str, + default=[], + metavar="L", + ), + Argument( + name="test-all", + short="a", + help="Run the Post Process Tests too.", + action=ArgAction.STORE_TRUE, + default=False, + dest="test_all", + ), + Argument( + name="percent", + short="%", + help="Percentage of tests to run.", + type=int, + default=100, + ), + Argument( + name="max-attempts", + short="m", + help="Maximum number of attempts to run a test.", + type=int, + default=1, + dest="max_attempts", + ), + Argument( + name="rdma-mpi", + help="Run tests with RDMA MPI enabled", + action=ArgAction.STORE_TRUE, + default=False, + dest="rdma_mpi", + ), + Argument( + name="no-build", + help="(Testing) Do not rebuild MFC.", + action=ArgAction.STORE_TRUE, + default=False, + dest="no_build", + ), + Argument( + name="no-examples", + help="Do not test example cases.", + action=ArgAction.STORE_TRUE, + default=False, + dest="no_examples", + ), + Argument( + name="case-optimization", + help="(GPU Optimization) Compile MFC targets with some case parameters hard-coded.", + action=ArgAction.STORE_TRUE, + default=False, + dest="case_optimization", + ), + Argument( + name="dry-run", + help="Build and generate case files but do not run tests.", + action=ArgAction.STORE_TRUE, + default=False, + dest="dry_run", + ), + ], + mutually_exclusive=[ + MutuallyExclusiveGroup(arguments=[ + Argument( + name="generate", + help="(Test Generation) Generate golden files.", + action=ArgAction.STORE_TRUE, + default=False, + ), + Argument( + name="add-new-variables", + help="(Test Generation) If new variables are found in D/ when running tests, add them to the golden files.", + action=ArgAction.STORE_TRUE, + default=False, + dest="add_new_variables", + ), + Argument( + name="remove-old-tests", + help="(Test Generation) Delete test directories that are no longer needed.", + action=ArgAction.STORE_TRUE, + default=False, + dest="remove_old_tests", + ), + ]), + ], + examples=[ + Example("./mfc.sh test", "Run all tests"), + Example("./mfc.sh test -j 4", "Run with 4 parallel jobs"), + Example("./mfc.sh test --only 3D", "Run only 3D tests"), + Example("./mfc.sh test --generate", "Regenerate golden files"), + ], + key_options=[ + ("-j, --jobs N", "Number of parallel test jobs"), + ("-o, --only PROP", "Run tests matching property"), + ("-f, --from UUID", "Start from specific test"), + ("--generate", "Generate/update golden files"), + ("--no-build", "Skip rebuilding MFC"), + ], +) + +CLEAN_COMMAND = Command( + name="clean", + aliases=["c"], + help="Clean build artifacts.", + description="Remove build artifacts and cache files.", + include_common=["targets", "mfc_config", "jobs", "verbose", "debug_log"], + examples=[ + Example("./mfc.sh clean", "Clean all build files"), + ], + key_options=[], +) + +VALIDATE_COMMAND = Command( + name="validate", + aliases=["v"], + help="Validate a case file without running.", + description="Check a case file for syntax errors and constraint violations.", + include_common=["debug_log"], # Only debug-log from common + positionals=[ + Positional( + name="input", + help="Path to case file to validate.", + completion=Completion(type=CompletionType.FILES_PY), + ), + ], + examples=[ + Example("./mfc.sh validate case.py", "Check syntax and constraints"), + Example("./mfc.sh validate case.py -d", "Validate with debug output"), + ], + key_options=[ + ("-d, --debug-log", "Enable debug logging"), + ], +) + +NEW_COMMAND = Command( + name="new", + help="Create a new case from a template.", + description="Create a new simulation case directory from a built-in or example template.", + positionals=[ + Positional( + name="name", + help="Name/path for the new case directory.", + nargs="?", + default=None, + completion=Completion(type=CompletionType.DIRECTORIES), + ), + ], + arguments=[ + Argument( + name="template", + short="t", + help="Template to use (e.g., 1D_minimal, 2D_minimal, 3D_minimal, or example:).", + default="1D_minimal", + completion=Completion( + type=CompletionType.CHOICES, + choices=["1D_minimal", "2D_minimal", "3D_minimal"], + ), + ), + Argument( + name="list", + short="l", + help="List available templates.", + action=ArgAction.STORE_TRUE, + ), + ], + examples=[ + Example("./mfc.sh new my_case", "Create with 1D_minimal template"), + Example("./mfc.sh new my_case -t 2D_minimal", "Create with 2D template"), + Example("./mfc.sh new my_case -t example:3D_sphbubcollapse", "Copy from example"), + Example("./mfc.sh new --list", "List available templates"), + ], + key_options=[ + ("-t, --template NAME", "Template: 1D_minimal, 2D_minimal, 3D_minimal"), + ("-l, --list", "List all available templates"), + ], +) + +PACKER_COMMAND = Command( + name="packer", + help="Packer utility (pack/unpack/compare).", + description="Pack simulation output into a single file or compare packed files.", + subcommands=[ + Command( + name="pack", + help="Pack a case into a single file.", + positionals=[ + Positional( + name="input", + help="Input file of case to pack.", + completion=Completion(type=CompletionType.FILES_PY), + ), + ], + arguments=[ + Argument( + name="output", + short="o", + help="Base name of output file.", + default=None, + metavar="OUTPUT", + ), + ], + ), + Command( + name="compare", + help="Compare two cases.", + positionals=[ + Positional( + name="input1", + help="First pack file.", + completion=Completion(type=CompletionType.FILES_PACK), + ), + Positional( + name="input2", + help="Second pack file.", + completion=Completion(type=CompletionType.FILES_PACK), + ), + ], + arguments=[ + Argument( + name="reltol", + short="rel", + help="Relative tolerance.", + type=float, + default=1e-12, + metavar="RELTOL", + ), + Argument( + name="abstol", + short="abs", + help="Absolute tolerance.", + type=float, + default=1e-12, + metavar="ABSTOL", + ), + ], + ), + ], + examples=[ + Example("./mfc.sh packer pack case.py", "Pack case output"), + Example("./mfc.sh packer compare a.pack b.pack", "Compare two packed files"), + ], + key_options=[], +) + +COMPLETION_COMMAND = Command( + name="completion", + help="Install shell tab-completion.", + description="Install or manage shell tab-completion for MFC commands.", + positionals=[ + Positional( + name="completion_action", + help="Action: install, uninstall, or status", + nargs="?", + default=None, + choices=["install", "uninstall", "status"], + completion=Completion( + type=CompletionType.CHOICES, + choices=["install", "uninstall", "status"], + ), + ), + Positional( + name="completion_shell", + help="Shell type: bash or zsh (auto-detected if not specified)", + nargs="?", + default=None, + choices=["bash", "zsh"], + completion=Completion( + type=CompletionType.CHOICES, + choices=["bash", "zsh"], + ), + ), + ], + examples=[ + Example("./mfc.sh completion install", "Install tab completion for current shell"), + Example("./mfc.sh completion install bash", "Install for bash specifically"), + Example("./mfc.sh completion status", "Check completion installation status"), + ], + key_options=[ + ("install", "Install completion scripts"), + ("uninstall", "Remove completion scripts"), + ("status", "Check installation status"), + ], +) + +HELP_COMMAND = Command( + name="help", + help="Show help on a topic.", + positionals=[ + Positional( + name="topic", + help="Help topic: gpu, clusters, batch, debugging, performance", + nargs="?", + default=None, + choices=["gpu", "clusters", "batch", "debugging", "performance"], + completion=Completion( + type=CompletionType.CHOICES, + choices=["gpu", "clusters", "batch", "debugging", "performance"], + ), + ), + ], +) + +# Simple commands (shell scripts, minimal arguments) +LOAD_COMMAND = Command( + name="load", + help="Loads the MFC environment with source.", + description="Load MFC environment modules (use with source).", + examples=[ + Example("source ./mfc.sh load -c p -m g", "Load Phoenix GPU modules"), + Example("source ./mfc.sh load -c f -m c", "Load Frontier CPU modules"), + ], + key_options=[ + ("-c CLUSTER", "Cluster: p(hoenix), f(rontier), a(ndes), etc."), + ("-m MODE", "Mode: c(pu), g(pu)"), + ], +) + +LINT_COMMAND = Command( + name="lint", + help="Lints and tests all toolchain code.", + description="Run pylint and unit tests on MFC's toolchain Python code.", + arguments=[ + Argument( + name="no-test", + help="Skip running unit tests (only run pylint).", + action=ArgAction.STORE_TRUE, + ), + ], + examples=[ + Example("./mfc.sh lint", "Run pylint and unit tests"), + Example("./mfc.sh lint --no-test", "Run only pylint (skip unit tests)"), + ], + key_options=[ + ("--no-test", "Skip unit tests, only run pylint"), + ], +) + +FORMAT_COMMAND = Command( + name="format", + help="Formats all code after editing.", + description="Format all code in the repository.", + examples=[ + Example("./mfc.sh format", "Format all code"), + Example("./mfc.sh format -j 8", "Format with 8 parallel jobs"), + ], + key_options=[ + ("-j, --jobs N", "Number of parallel formatting jobs"), + ], +) + +SPELLING_COMMAND = Command( + name="spelling", + help="Runs the spell checker after editing.", + description="Check spelling in the codebase.", + examples=[ + Example("./mfc.sh spelling", "Run spell checker"), + ], +) + +INTERACTIVE_COMMAND = Command( + name="interactive", + help="Launch interactive menu-driven interface.", + description="Launch an interactive menu for MFC operations.", +) + +GENERATE_COMMAND = Command( + name="generate", + help="Regenerate completion scripts from CLI schema.", + description="Regenerate shell completion scripts, documentation, and JSON schema from the CLI schema.", + arguments=[ + Argument( + name="check", + help="Check if generated files are up to date (exit 1 if not).", + action=ArgAction.STORE_TRUE, + default=False, + ), + Argument( + name="json-schema", + help="Generate JSON Schema for IDE auto-completion of case files.", + action=ArgAction.STORE_TRUE, + default=False, + dest="json_schema", + ), + ], + examples=[ + Example("./mfc.sh generate", "Regenerate completion scripts"), + Example("./mfc.sh generate --check", "Check if completions are up to date"), + Example("./mfc.sh generate --json-schema", "Generate JSON Schema for IDE support"), + ], +) + +# Bench commands (CI-specific) +BENCH_COMMAND = Command( + name="bench", + help="Benchmark MFC (for CI).", + include_common=["targets", "mfc_config", "jobs", "verbose", "debug_log", "gpus"], + arguments=[ + Argument( + name="output", + short="o", + help="Path to the YAML output file to write the results to.", + required=True, + metavar="OUTPUT", + ), + Argument( + name="mem", + short="m", + help="Memory per task for benchmarking cases", + type=int, + default=1, + metavar="MEM", + ), + ], + examples=[ + Example("./mfc.sh bench -o results.yaml", "Run benchmarks and save results"), + ], + key_options=[ + ("-o, --output FILE", "Output file for benchmark results (required)"), + ("-m, --mem SIZE", "Memory limit for benchmarks"), + ], +) + +BENCH_DIFF_COMMAND = Command( + name="bench_diff", + help="Compare MFC Benchmarks (for CI).", + include_common=["mfc_config", "jobs", "verbose", "debug_log"], + # Note: does NOT include "targets" + positionals=[ + Positional(name="lhs", help="Path to a benchmark result YAML file."), + Positional(name="rhs", help="Path to a benchmark result YAML file."), + ], +) + +COUNT_COMMAND = Command( + name="count", + help="Count LOC in MFC.", + include_common=["targets", "mfc_config", "jobs", "verbose", "debug_log"], + examples=[ + Example("./mfc.sh count", "Show LOC statistics"), + ], +) + +COUNT_DIFF_COMMAND = Command( + name="count_diff", + help="Compare LOC between branches.", + include_common=["targets", "mfc_config", "jobs", "verbose", "debug_log"], +) + +PARAMS_COMMAND = Command( + name="params", + help="Search and explore MFC case parameters.", + description="Search, list, and get information about MFC's ~3,300 case parameters.", + positionals=[ + Positional( + name="query", + help="Search query (parameter name or pattern to search for).", + nargs="?", + default=None, + ), + ], + arguments=[ + Argument( + name="type", + short="t", + help="Filter by type: int, real, log, str.", + choices=["int", "real", "log", "str"], + default=None, + dest="param_type", + ), + Argument( + name="families", + short="f", + help="List parameter families (grouped by prefix).", + action=ArgAction.STORE_TRUE, + default=False, + ), + Argument( + name="features", + short="F", + help="List feature groups (mhd, bubbles, weno, etc.).", + action=ArgAction.STORE_TRUE, + default=False, + ), + Argument( + name="feature", + help="Show all parameters for a feature group (e.g., --feature mhd).", + type=str, + default=None, + metavar="NAME", + ), + Argument( + name="names-only", + help="Only search parameter names (not descriptions).", + action=ArgAction.STORE_TRUE, + default=False, + dest="names_only", + ), + Argument( + name="count", + short="c", + help="Show count statistics only.", + action=ArgAction.STORE_TRUE, + default=False, + ), + Argument( + name="limit", + short="n", + help="Maximum number of results to show (default: unlimited).", + type=int, + default=10000, + ), + Argument( + name="describe", + short="d", + help="Show parameter descriptions.", + action=ArgAction.STORE_TRUE, + default=False, + ), + ], + examples=[ + Example("./mfc.sh params weno", "Search for parameters (names + descriptions)"), + Example("./mfc.sh params magnetic", "Find params mentioning 'magnetic'"), + Example("./mfc.sh params --feature mhd", "Show all MHD-related parameters"), + Example("./mfc.sh params -F", "List all feature groups"), + Example("./mfc.sh params -f", "List parameter families (by prefix)"), + Example("./mfc.sh params -c", "Show parameter count statistics"), + ], + key_options=[ + ("--feature NAME", "Show params for a feature (mhd, bubbles, weno, etc.)"), + ("-F, --features", "List all feature groups"), + ("-f, --families", "List parameter families (by prefix)"), + ("-t, --type", "Filter by type (int, real, log, str)"), + ("--names-only", "Only search names (skip descriptions)"), + ], +) + + +# ============================================================================= +# HELP TOPICS +# ============================================================================= + +HELP_TOPICS = { + "gpu": { + "title": "GPU Configuration", + "description": "How to configure GPU builds and runs", + }, + "clusters": { + "title": "Cluster Configuration", + "description": "How to configure MFC for different HPC clusters", + }, + "batch": { + "title": "Batch Job Submission", + "description": "How to submit batch jobs with MFC", + }, + "debugging": { + "title": "Debugging & Troubleshooting", + "description": "Tips for debugging MFC issues", + }, +} + + +# ============================================================================= +# COMPLETE CLI SCHEMA +# ============================================================================= + +MFC_CLI_SCHEMA = CLISchema( + prog="./mfc.sh", + description="""\ +Welcome to the MFC master script. This tool automates and manages building, testing, \ +running, and cleaning of MFC in various configurations on all supported platforms. \ +The README documents this tool and its various commands in more detail. To get \ +started, run `./mfc.sh build -h`.""", + + arguments=[ + Argument( + name="help", + short="h", + help="Show help message", + action=ArgAction.STORE_TRUE, + ), + ], + + commands=[ + BUILD_COMMAND, + RUN_COMMAND, + TEST_COMMAND, + CLEAN_COMMAND, + VALIDATE_COMMAND, + NEW_COMMAND, + PARAMS_COMMAND, + PACKER_COMMAND, + COMPLETION_COMMAND, + HELP_COMMAND, + GENERATE_COMMAND, + LOAD_COMMAND, + LINT_COMMAND, + FORMAT_COMMAND, + SPELLING_COMMAND, + INTERACTIVE_COMMAND, + BENCH_COMMAND, + BENCH_DIFF_COMMAND, + COUNT_COMMAND, + COUNT_DIFF_COMMAND, + ], + + common_sets=[ + COMMON_TARGETS, + COMMON_JOBS, + COMMON_VERBOSE, + COMMON_DEBUG_LOG, + COMMON_GPUS, + COMMON_MFC_CONFIG, + ], + + help_topics=HELP_TOPICS, +) + + +# ============================================================================= +# DERIVED DATA (for use by other modules) +# ============================================================================= + +# Command aliases mapping (replaces COMMAND_ALIASES in user_guide.py) +COMMAND_ALIASES = {} +for cmd in MFC_CLI_SCHEMA.commands: + for alias in cmd.aliases: + COMMAND_ALIASES[alias] = cmd.name + +# Commands dict (replaces COMMANDS in user_guide.py) +def get_commands_dict(): + """Generate COMMANDS dict from schema for user_guide.py compatibility.""" + return { + cmd.name: { + "description": cmd.description or cmd.help, + "alias": cmd.aliases[0] if cmd.aliases else None, + "examples": [(e.command, e.description) for e in cmd.examples], + "key_options": list(cmd.key_options), + } + for cmd in MFC_CLI_SCHEMA.commands + } + +COMMANDS = get_commands_dict() diff --git a/toolchain/mfc/cli/completion_gen.py b/toolchain/mfc/cli/completion_gen.py new file mode 100644 index 0000000000..d34d7d9cdc --- /dev/null +++ b/toolchain/mfc/cli/completion_gen.py @@ -0,0 +1,415 @@ +""" +Generate shell completion scripts from CLI schema. + +This module generates bash and zsh completion scripts that are always +in sync with the CLI schema definitions. +""" + +from typing import List, Set +from .schema import CLISchema, Command, CompletionType + + +# Mapping of completion types to bash completion expressions +_BASH_COMPLETION_MAP = { + CompletionType.FILES_PY: 'COMPREPLY=( $(compgen -f -X "!*.py" -- "${cur}") $(compgen -d -- "${cur}") )', + CompletionType.FILES_PACK: 'COMPREPLY=( $(compgen -f -X "!*.pack" -- "${cur}") $(compgen -d -- "${cur}") )', + CompletionType.FILES_YAML: 'COMPREPLY=( $(compgen -f -X "!*.y*ml" -- "${cur}") $(compgen -d -- "${cur}") )', + CompletionType.FILES: 'COMPREPLY=( $(compgen -f -- "${cur}") $(compgen -d -- "${cur}") )', + CompletionType.DIRECTORIES: 'COMPREPLY=( $(compgen -d -- "${cur}") )', +} + + +def _collect_all_options(cmd: Command, schema: CLISchema) -> List[str]: + """Collect all option flags for a command including common sets.""" + options: Set[str] = set() + + # Common arguments + for set_name in cmd.include_common: + common_set = schema.get_common_set(set_name) + if common_set is None: + continue + + # MFC config flags + if common_set.mfc_config_flags: + options.update([ + "--mpi", "--no-mpi", + "--gpu", "--no-gpu", + "--debug", "--no-debug", + "--gcov", "--no-gcov", + "--unified", "--no-unified", + "--single", "--no-single", + "--mixed", "--no-mixed", + "--fastmath", "--no-fastmath", + ]) + else: + for arg in common_set.arguments: + if arg.short: + options.add(f"-{arg.short}") + options.add(f"--{arg.name}") + + # Command-specific arguments + for arg in cmd.arguments: + if arg.short: + options.add(f"-{arg.short}") + options.add(f"--{arg.name}") + + # Mutually exclusive arguments + for meg in cmd.mutually_exclusive: + for arg in meg.arguments: + if arg.short: + options.add(f"-{arg.short}") + options.add(f"--{arg.name}") + + return sorted(options) + + +def _bash_completion_for_type(comp_type: CompletionType, choices: List[str] = None) -> str: + """Generate bash completion expression for a completion type.""" + if comp_type == CompletionType.CHOICES and choices: + return f'COMPREPLY=( $(compgen -W "{" ".join(choices)}" -- "${{cur}}") )' + return _BASH_COMPLETION_MAP.get(comp_type, "") + + +def _collect_all_args(cmd: Command, schema: CLISchema) -> list: + """Collect all arguments for a command including common sets.""" + all_args = list(cmd.arguments) + for meg in cmd.mutually_exclusive: + all_args.extend(meg.arguments) + for set_name in cmd.include_common: + common_set = schema.get_common_set(set_name) + if common_set and not common_set.mfc_config_flags: + all_args.extend(common_set.arguments) + return all_args + + +def _generate_bash_prev_cases(cmd: Command, schema: CLISchema) -> List[str]: + """Generate bash prev-based completion cases for a command.""" + lines = [] + has_prev_cases = False + completable_types = (CompletionType.CHOICES, CompletionType.FILES_PY, + CompletionType.FILES_PACK, CompletionType.FILES, + CompletionType.DIRECTORIES, CompletionType.FILES_YAML) + + all_args = _collect_all_args(cmd, schema) + + # First, handle multi-value arguments (nargs="+" or "*") + # These need backward scanning through COMP_WORDS + multivalue_args = [] + for arg in all_args: + if arg.completion.type not in completable_types: + continue + if arg.nargs in ("+", "*"): + multivalue_args.append(arg) + + if multivalue_args: + # Generate backward-scanning logic for multi-value args + lines.append(' # Check for multi-value arguments by scanning backwards') + lines.append(' local i') + lines.append(' for ((i=COMP_CWORD-1; i>=2; i--)); do') + lines.append(' case "${COMP_WORDS[i]}" in') + + for arg in multivalue_args: + flags = [f'-{arg.short}'] if arg.short else [] + flags.append(f'--{arg.name}') + lines.append(f' {"|".join(flags)})') + comp_choices = arg.completion.choices or arg.choices + completion_code = _bash_completion_for_type(arg.completion.type, comp_choices) + if completion_code: + lines.append(f' {completion_code}') + lines.append(' return 0') + lines.append(' ;;') + + # Stop scanning if we hit any other flag + lines.append(' -*)') + lines.append(' break') + lines.append(' ;;') + lines.append(' esac') + lines.append(' done') + lines.append('') + + # Then handle single-value arguments with prev-based completion + for arg in all_args: + if arg.completion.type not in completable_types: + continue + # Skip multi-value args as they're handled above + if arg.nargs in ("+", "*"): + continue + + if not has_prev_cases: + lines.append(' case "${prev}" in') + has_prev_cases = True + + flags = [f'-{arg.short}'] if arg.short else [] + flags.append(f'--{arg.name}') + + lines.append(f' {"|".join(flags)})') + comp_choices = arg.completion.choices or arg.choices + completion_code = _bash_completion_for_type(arg.completion.type, comp_choices) + if completion_code: + lines.append(f' {completion_code}') + lines.append(' return 0') + lines.append(' ;;') + + if has_prev_cases: + lines.append(' esac') + + return lines + + +def _generate_bash_command_case(cmd: Command, schema: CLISchema) -> List[str]: + """Generate bash completion case for a single command.""" + lines = [] + + # Include aliases in case pattern + patterns = [cmd.name] + cmd.aliases + lines.append(f' {"|".join(patterns)})') + + options = _collect_all_options(cmd, schema) + + # Handle subcommands (like packer pack, packer compare) + if cmd.subcommands: + lines.append(' if [[ ${COMP_CWORD} -eq 2 ]]; then') + subcmd_names = [sc.name for sc in cmd.subcommands] + lines.append(f' COMPREPLY=( $(compgen -W "{" ".join(subcmd_names)}" -- "${{cur}}") )') + lines.append(' return 0') + lines.append(' fi') + lines.append(' ;;') + return lines + + # Generate prev-based completion + lines.extend(_generate_bash_prev_cases(cmd, schema)) + + # Default completion - options or positional files + if options: + lines.append(f' local opts="{" ".join(options)}"') + lines.append(' if [[ "${cur}" == -* ]]; then') + lines.append(' COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )') + + if cmd.positionals and cmd.positionals[0].completion.type != CompletionType.NONE: + lines.append(' else') + pos = cmd.positionals[0] + comp_choices = pos.completion.choices or pos.choices + completion_code = _bash_completion_for_type(pos.completion.type, comp_choices) + if completion_code: + lines.append(f' {completion_code}') + + lines.append(' fi') + elif cmd.positionals and cmd.positionals[0].completion.type != CompletionType.NONE: + pos = cmd.positionals[0] + comp_choices = pos.completion.choices or pos.choices + completion_code = _bash_completion_for_type(pos.completion.type, comp_choices) + if completion_code: + lines.append(f' {completion_code}') + + lines.append(' return 0') + lines.append(' ;;') + return lines + + +def generate_bash_completion(schema: CLISchema) -> str: + """Generate bash completion script from schema.""" + commands = schema.get_all_command_names() + + lines = [ + '#!/usr/bin/env bash', + '# AUTO-GENERATED from cli/commands.py - Do not edit manually', + '# Regenerate with: ./mfc.sh generate', + '', + '_mfc_completions() {', + ' local cur prev command', + ' COMPREPLY=()', + ' cur="${COMP_WORDS[COMP_CWORD]}"', + ' prev="${COMP_WORDS[COMP_CWORD-1]}"', + '', + f' local commands="{" ".join(sorted(commands))}"', + '', + ' # First argument - complete commands', + ' if [[ ${COMP_CWORD} -eq 1 ]]; then', + ' COMPREPLY=( $(compgen -W "${commands}" -- "${cur}") )', + ' return 0', + ' fi', + '', + ' local command="${COMP_WORDS[1]}"', + '', + ' case "${command}" in', + ] + + for cmd in schema.commands: + if not cmd.arguments and not cmd.positionals and not cmd.include_common and not cmd.subcommands: + continue + lines.extend(_generate_bash_command_case(cmd, schema)) + + lines.extend([ + ' esac', + '', + ' return 0', + '}', + '', + 'complete -o filenames -o bashdefault -F _mfc_completions ./mfc.sh', + 'complete -o filenames -o bashdefault -F _mfc_completions mfc.sh', + 'complete -o filenames -o bashdefault -F _mfc_completions mfc', + ]) + + return '\n'.join(lines) + + +def _zsh_completion_for_positional(pos, index: int) -> str: + """Generate zsh completion spec for a positional argument.""" + completion = "" + if pos.completion.type == CompletionType.FILES_PY: + completion = ':_files -g "*.py"' + elif pos.completion.type == CompletionType.FILES_PACK: + completion = ':_files -g "*.pack"' + elif pos.completion.type == CompletionType.CHOICES: + choices = pos.completion.choices or pos.choices or [] + completion = f':({" ".join(choices)})' + elif pos.completion.type == CompletionType.DIRECTORIES: + completion = ':_files -/' + elif pos.completion.type == CompletionType.FILES: + completion = ':_files' + + help_text = pos.help.replace("'", "").replace("[", "").replace("]", "")[:120] + return f"'{index}:{help_text}{completion}'" + + +def _zsh_completion_for_arg(arg) -> str: + """Generate zsh completion suffix for an argument.""" + # For multi-value args (nargs="+" or "*"), add a label before the choices + is_multivalue = arg.nargs in ("+", "*") + label = ":value" if is_multivalue else "" + + if arg.completion.type == CompletionType.CHOICES: + choices = arg.completion.choices or arg.choices or [] + return f'{label}:({" ".join(str(c) for c in choices)})' + if arg.completion.type == CompletionType.FILES_PY: + return f'{label}:_files -g "*.py"' + if arg.completion.type == CompletionType.FILES_PACK: + return f'{label}:_files -g "*.pack"' + if arg.completion.type == CompletionType.FILES: + return f'{label}:_files' + if arg.completion.type == CompletionType.DIRECTORIES: + return f'{label}:_files -/' + return "" + + +def _zsh_arg_prefix(arg) -> str: + """Return '*' prefix for multi-value args, empty string otherwise.""" + return "*" if arg.nargs in ("+", "*") else "" + + +def _generate_zsh_command_args(cmd: Command, schema: CLISchema) -> List[str]: + """Generate zsh argument lines for a command.""" + arg_lines = [] + + # Positionals + for i, pos in enumerate(cmd.positionals): + arg_lines.append(_zsh_completion_for_positional(pos, i + 1)) + + # Options from common sets + for set_name in cmd.include_common: + common_set = schema.get_common_set(set_name) + if common_set is None: + continue + + if common_set.mfc_config_flags: + arg_lines.extend([ + "'--mpi[Enable MPI]'", + "'--no-mpi[Disable MPI]'", + "'--gpu[Enable GPU]:mode:(acc mp)'", + "'--no-gpu[Disable GPU]'", + "'--debug[Enable debug mode]'", + "'--no-debug[Disable debug mode]'", + "'--gcov[Enable gcov coverage]'", + "'--no-gcov[Disable gcov coverage]'", + "'--unified[Enable unified memory]'", + "'--no-unified[Disable unified memory]'", + "'--single[Enable single precision]'", + "'--no-single[Disable single precision]'", + "'--mixed[Enable mixed precision]'", + "'--no-mixed[Disable mixed precision]'", + "'--fastmath[Enable fast math]'", + "'--no-fastmath[Disable fast math]'", + ]) + else: + for arg in common_set.arguments: + desc = arg.help.replace("'", "").replace("[", "").replace("]", "")[:120] + completion = _zsh_completion_for_arg(arg) + prefix = _zsh_arg_prefix(arg) + if arg.short: + arg_lines.append(f"'{prefix}-{arg.short}[{desc}]{completion}'") + arg_lines.append(f"'{prefix}--{arg.name}[{desc}]{completion}'") + + # Command-specific arguments + all_args = list(cmd.arguments) + for meg in cmd.mutually_exclusive: + all_args.extend(meg.arguments) + + for arg in all_args: + desc = arg.help.replace("'", "").replace("[", "").replace("]", "")[:120] + completion = _zsh_completion_for_arg(arg) + prefix = _zsh_arg_prefix(arg) + if arg.short: + arg_lines.append(f"'{prefix}-{arg.short}[{desc}]{completion}'") + arg_lines.append(f"'{prefix}--{arg.name}[{desc}]{completion}'") + + return arg_lines + + +def generate_zsh_completion(schema: CLISchema) -> str: + """Generate zsh completion script from schema.""" + lines = [ + '#compdef mfc.sh ./mfc.sh mfc', + '# AUTO-GENERATED from cli/commands.py - Do not edit manually', + '# Regenerate with: ./mfc.sh generate', + '', + '_mfc() {', + ' local context state state_descr line', + ' typeset -A opt_args', + '', + ' local -a commands', + ' commands=(', + ] + + # Commands with descriptions + for cmd in schema.commands: + desc = cmd.help.replace("'", "\\'").replace('"', '\\"') + lines.append(f' "{cmd.name}:{desc}"') + for alias in cmd.aliases: + lines.append(f' "{alias}:Alias for {cmd.name}"') + + lines.extend([ + ' )', + '', + ' _arguments -C \\', + " '1: :->command' \\", + " '*:: :->args'", + '', + ' case $state in', + ' command)', + " _describe -t commands 'mfc command' commands", + ' ;;', + ' args)', + ' case $words[1] in', + ]) + + # Generate case for each command + for cmd in schema.commands: + all_names = [cmd.name] + cmd.aliases + for name in all_names: + lines.append(f' {name})') + arg_lines = _generate_zsh_command_args(cmd, schema) + if arg_lines: + lines.append(' _arguments \\') + lines.append(' ' + ' \\\n '.join(arg_lines)) + lines.append(' ;;') + + lines.extend([ + ' esac', + ' ;;', + ' esac', + '}', + '', + '_mfc "$@"', + ]) + + return '\n'.join(lines) diff --git a/toolchain/mfc/cli/docs_gen.py b/toolchain/mfc/cli/docs_gen.py new file mode 100644 index 0000000000..0c4b8b69a7 --- /dev/null +++ b/toolchain/mfc/cli/docs_gen.py @@ -0,0 +1,326 @@ +""" +Generate CLI documentation from schema. + +This module generates markdown documentation that is always +in sync with the CLI schema definitions. +""" + +from typing import List +from .schema import CLISchema, Command, Argument + + +def _format_argument_row(arg: Argument) -> str: + """Format a single argument as a markdown table row.""" + flags = [] + if arg.short: + flags.append(f"`-{arg.short}`") + flags.append(f"`--{arg.name}`") + flag_str = ", ".join(flags) + + # Format default value + default_str = _format_default(arg.default) + + # Escape pipes in help text + help_text = arg.help.replace("|", "\\|") + + return f"| {flag_str} | {help_text} | {default_str} |" + + +def _format_default(default) -> str: + """Format a default value for display.""" + if default is None: + return "-" + if isinstance(default, bool): + return "`true`" if default else "`false`" + if isinstance(default, list): + if not default: + return "`[]`" + result = ", ".join(str(d) for d in default[:3]) + if len(default) > 3: + result += "..." + return f"`{result}`" + # Handle empty strings + if default == "": + return "-" + return f"`{default}`" + + +def _get_command_arguments(cmd: Command, schema: CLISchema) -> List[Argument]: + """Get all arguments for a command including common sets.""" + all_args = [] + + # Collect from common sets + for set_name in cmd.include_common: + common_set = schema.get_common_set(set_name) + if common_set and not common_set.mfc_config_flags: + all_args.extend(common_set.arguments) + + # Add command-specific arguments + all_args.extend(cmd.arguments) + + # Add mutually exclusive arguments + for meg in cmd.mutually_exclusive: + all_args.extend(meg.arguments) + + return all_args + + +def _generate_options_table(cmd: Command, schema: CLISchema) -> List[str]: + """Generate the options table for a command.""" + lines = [] + all_args = _get_command_arguments(cmd, schema) + + if all_args or "mfc_config" in cmd.include_common: + lines.append("**Options:**") + lines.append("") + lines.append("| Option | Description | Default |") + lines.append("|--------|-------------|---------|") + + for arg in all_args: + lines.append(_format_argument_row(arg)) + + # Add MFC config flags if included + if "mfc_config" in cmd.include_common: + lines.append("| `--mpi`, `--no-mpi` | Enable/disable MPI | `true` |") + lines.append("| `--gpu [acc/mp]`, `--no-gpu` | Enable GPU (OpenACC/OpenMP) | `no` |") + lines.append("| `--debug`, `--no-debug` | Enable debug mode | `false` |") + + lines.append("") + + return lines + + +def _generate_subcommands_section(cmd: Command) -> List[str]: + """Generate the subcommands section for a command.""" + lines = [] + if not cmd.subcommands: + return lines + + lines.append("**Subcommands:**") + lines.append("") + + for subcmd in cmd.subcommands: + lines.append(f"#### {cmd.name} {subcmd.name}") + lines.append("") + lines.append(subcmd.help) + lines.append("") + + if subcmd.positionals: + lines.append("Arguments:") + for pos in subcmd.positionals: + lines.append(f"- `{pos.name.upper()}` - {pos.help}") + lines.append("") + + if subcmd.arguments: + lines.append("Options:") + lines.append("") + lines.append("| Option | Description | Default |") + lines.append("|--------|-------------|---------|") + for arg in subcmd.arguments: + lines.append(_format_argument_row(arg)) + lines.append("") + + return lines + + +def _generate_command_section(cmd: Command, schema: CLISchema) -> List[str]: + """Generate markdown section for a single command.""" + lines = [] + + # Command header (no alias in heading to keep anchor simple) + lines.append(f"### {cmd.name}") + lines.append("") + + # Alias note if present + if cmd.aliases: + lines.append(f"**Alias:** `{cmd.aliases[0]}`") + lines.append("") + + # Description + description = cmd.description or cmd.help + lines.append(description) + lines.append("") + + # Usage + usage_parts = ["./mfc.sh", cmd.name] + for pos in cmd.positionals: + if pos.nargs == "?": + usage_parts.append(f"[{pos.name.upper()}]") + else: + usage_parts.append(pos.name.upper()) + usage_parts.append("[OPTIONS]") + lines.append(f"**Usage:** `{' '.join(usage_parts)}`") + lines.append("") + + # Positional arguments + if cmd.positionals: + lines.append("**Arguments:**") + lines.append("") + for pos in cmd.positionals: + lines.append(f"- `{pos.name.upper()}` - {pos.help}") + lines.append("") + + # Options table + lines.extend(_generate_options_table(cmd, schema)) + + # Examples + if cmd.examples: + lines.append("**Examples:**") + lines.append("") + lines.append("```bash") + for example in cmd.examples: + lines.append(f"# {example.description}") + lines.append(example.command) + lines.append("") + lines.append("```") + lines.append("") + + # Subcommands + lines.extend(_generate_subcommands_section(cmd)) + + lines.append("---") + lines.append("") + + return lines + + +def _generate_commands_by_category( + schema: CLISchema, + category_commands: List[str], + header: str +) -> List[str]: + """Generate command sections for a category.""" + lines = [] + matching = [c for c in schema.commands if c.name in category_commands] + if matching: + lines.append(f"## {header}") + lines.append("") + for cmd in matching: + lines.extend(_generate_command_section(cmd, schema)) + return lines + + +def generate_cli_reference(schema: CLISchema) -> str: + """Generate complete CLI reference documentation in markdown.""" + lines = [ + "@page cli-reference Command Line Reference", + "", + "# Command Line Reference", + "", + "> **Auto-generated** from `toolchain/mfc/cli/commands.py`", + "> ", + "> Regenerate with: `./mfc.sh generate`", + "", + "## Overview", + "", + schema.description, + "", + "## Quick Reference", + "", + "| Command | Alias | Description |", + "|---------|-------|-------------|", + ] + + # Quick reference table + for cmd in schema.commands: + alias = f"`{cmd.aliases[0]}`" if cmd.aliases else "-" + # Use HTML code tag inside link to avoid Doxygen markdown parsing issues + lines.append(f"| [{cmd.name}](#{cmd.name}) | {alias} | {cmd.help} |") + + lines.append("") + lines.append("## Commands") + lines.append("") + + # Command categories + core_commands = ["build", "run", "test", "clean", "validate"] + utility_commands = ["new", "params", "packer", "completion", "generate", "help"] + dev_commands = ["lint", "format", "spelling", "count", "count_diff"] + ci_commands = ["bench", "bench_diff"] + other_commands = ["load", "interactive"] + + # Core workflow commands first (no header, directly under Commands) + for cmd in schema.commands: + if cmd.name in core_commands: + lines.extend(_generate_command_section(cmd, schema)) + + # Category sections + lines.extend(_generate_commands_by_category(schema, utility_commands, "Utility Commands")) + lines.extend(_generate_commands_by_category(schema, dev_commands, "Development Commands")) + lines.extend(_generate_commands_by_category(schema, ci_commands, "CI Commands")) + lines.extend(_generate_commands_by_category(schema, other_commands, "Other Commands")) + + # Common options section + lines.extend([ + "## Common Options", + "", + "Many commands share common option sets:", + "", + "### Target Selection (`-t, --targets`)", + "", + "Available targets:", + "- `pre_process` - Pre-processor", + "- `simulation` - Main simulation", + "- `post_process` - Post-processor", + "- `syscheck` - System check utility", + "- `documentation` - Build documentation", + "", + "### Build Configuration Flags", + "", + "| Flag | Description |", + "|------|-------------|", + "| `--mpi` / `--no-mpi` | Enable/disable MPI support |", + "| `--gpu [acc/mp]` / `--no-gpu` | Enable GPU with OpenACC or OpenMP |", + "| `--debug` / `--no-debug` | Enable debug build |", + "| `--gcov` / `--no-gcov` | Enable code coverage |", + "| `--single` / `--no-single` | Single precision |", + "| `--mixed` / `--no-mixed` | Mixed precision |", + "", + "### Verbosity (`-v, --verbose`)", + "", + "- `-v` - Basic verbose output", + "- `-vv` - Show build commands", + "- `-vvv` - Full debug output including CMake debug", + "", + ]) + + return "\n".join(lines) + + +def generate_command_summary(schema: CLISchema) -> str: + """Generate a concise command summary for quick reference.""" + lines = [ + "# MFC Quick Reference", + "", + "## Commands", + "", + ] + + for cmd in schema.commands: + alias_str = f" ({cmd.aliases[0]})" if cmd.aliases else "" + lines.append(f"- **{cmd.name}**{alias_str}: {cmd.help}") + + lines.extend([ + "", + "## Common Patterns", + "", + "```bash", + "# Build MFC", + "./mfc.sh build", + "./mfc.sh build --gpu # With GPU support", + "./mfc.sh build -j 8 # Parallel build", + "", + "# Run a case", + "./mfc.sh run case.py", + "./mfc.sh run case.py -n 4 # 4 MPI ranks", + "", + "# Run tests", + "./mfc.sh test", + "./mfc.sh test -j 4 # Parallel tests", + "", + "# Validate a case", + "./mfc.sh validate case.py", + "```", + "", + ]) + + return "\n".join(lines) diff --git a/toolchain/mfc/cli/schema.py b/toolchain/mfc/cli/schema.py new file mode 100644 index 0000000000..d895f76e7b --- /dev/null +++ b/toolchain/mfc/cli/schema.py @@ -0,0 +1,204 @@ +""" +CLI Schema Dataclass Definitions. + +This module defines the dataclasses used to describe CLI commands, arguments, +and their properties. These definitions serve as the single source of truth +for generating argparse parsers, shell completions, and documentation. +""" + +from __future__ import annotations +from dataclasses import dataclass, field +from enum import Enum, auto +from typing import List, Optional, Any, Union + + +class ArgAction(Enum): + """Supported argparse actions.""" + STORE = "store" + STORE_TRUE = "store_true" + STORE_FALSE = "store_false" + STORE_CONST = "store_const" + COUNT = "count" + APPEND = "append" + + +class CompletionType(Enum): + """Types of shell completion behavior.""" + NONE = auto() # No completion + FILES = auto() # All file completion + FILES_PY = auto() # Python files only (*.py) + FILES_PACK = auto() # Pack files only (*.pack) + FILES_YAML = auto() # YAML files only (*.yaml, *.yml) + DIRECTORIES = auto() # Directory completion + CHOICES = auto() # Static choices from choices list + + +@dataclass +class Completion: + """Completion configuration for an argument.""" + type: CompletionType = CompletionType.NONE + choices: Optional[List[str]] = None + + +@dataclass +class Argument: # pylint: disable=too-many-instance-attributes + """ + Definition of a single CLI option argument (--flag). + + This represents one add_argument() call for a flag-style argument. + """ + # Identity + name: str # Long form without dashes (e.g., "targets") + short: Optional[str] = None # Short form without dash (e.g., "t") + + # Argparse configuration + help: str = "" + action: ArgAction = ArgAction.STORE + type: Optional[type] = None # str, int, float, etc. + default: Any = None + choices: Optional[List[Any]] = None + nargs: Optional[Union[str, int]] = None # "+", "*", "?", int, or "..." for REMAINDER + metavar: Optional[str] = None + required: bool = False + dest: Optional[str] = None # Override destination name + const: Any = None # For store_const action + + # Completion + completion: Completion = field(default_factory=Completion) + + def get_flags(self) -> List[str]: + """Return the flag strings for argparse.""" + flags = [] + if self.short: + flags.append(f"-{self.short}") + flags.append(f"--{self.name}") + return flags + + def get_dest(self) -> str: + """Return the destination name for argparse.""" + if self.dest: + return self.dest + return self.name.replace("-", "_") + + +@dataclass +class Positional: + """Definition of a positional argument.""" + name: str # Metavar and destination + help: str = "" + type: type = str + nargs: Optional[Union[str, int]] = None + default: Any = None + choices: Optional[List[str]] = None + + # Completion + completion: Completion = field(default_factory=Completion) + + +@dataclass +class Example: + """A usage example for documentation.""" + command: str + description: str + + +@dataclass +class MutuallyExclusiveGroup: + """A group where only one argument can be specified.""" + arguments: List[Argument] = field(default_factory=list) + required: bool = False + + +@dataclass +class Command: # pylint: disable=too-many-instance-attributes + """ + Definition of a CLI command/subcommand. + + This is the main building block for the CLI structure. + """ + # Identity + name: str + help: str + aliases: List[str] = field(default_factory=list) + + # Arguments + positionals: List[Positional] = field(default_factory=list) + arguments: List[Argument] = field(default_factory=list) + mutually_exclusive: List[MutuallyExclusiveGroup] = field(default_factory=list) + + # Inherit common argument sets + include_common: List[str] = field(default_factory=list) # e.g., ["targets", "jobs"] + + # Subcommands (for nested commands like "packer pack") + subcommands: List[Command] = field(default_factory=list) + + # Documentation + description: Optional[str] = None # Long description for docs + examples: List[Example] = field(default_factory=list) + key_options: List[tuple] = field(default_factory=list) # (option, description) pairs + + # Handler module path (for dispatch) + handler: Optional[str] = None # Module.function path + + +@dataclass +class CommonArgumentSet: + """ + A reusable set of arguments that can be included in multiple commands. + + Replaces the add_common_arguments() function pattern. + """ + name: str # Identifier for include_common + arguments: List[Argument] = field(default_factory=list) + # For MFCConfig flags that need --X and --no-X pairs + mfc_config_flags: bool = False + + +@dataclass +class CLISchema: + """ + The complete CLI schema - single source of truth. + + This dataclass contains all information needed to generate: + - argparse parsers + - Bash completions + - Zsh completions + - User guide help content + - CLI reference documentation + """ + prog: str = "./mfc.sh" + description: str = "" + + # Top-level arguments (like --help) + arguments: List[Argument] = field(default_factory=list) + + # Commands + commands: List[Command] = field(default_factory=list) + + # Reusable argument sets + common_sets: List[CommonArgumentSet] = field(default_factory=list) + + # Help topics (separate from commands) + help_topics: dict = field(default_factory=dict) + + def get_command(self, name: str) -> Optional[Command]: + """Get a command by name or alias.""" + for cmd in self.commands: + if cmd.name == name or name in cmd.aliases: + return cmd + return None + + def get_all_command_names(self) -> List[str]: + """Get all command names and aliases.""" + names = [] + for cmd in self.commands: + names.append(cmd.name) + names.extend(cmd.aliases) + return names + + def get_common_set(self, name: str) -> Optional[CommonArgumentSet]: + """Get a common argument set by name.""" + for cs in self.common_sets: + if cs.name == name: + return cs + return None diff --git a/toolchain/mfc/cli/test_cli.py b/toolchain/mfc/cli/test_cli.py new file mode 100644 index 0000000000..6eaee9963c --- /dev/null +++ b/toolchain/mfc/cli/test_cli.py @@ -0,0 +1,138 @@ +""" +Smoke tests for cli/ modules. + +Verifies that modules can be imported and basic functionality works. +""" +# pylint: disable=import-outside-toplevel + +import unittest + + +class TestCliImports(unittest.TestCase): + """Test that all CLI modules can be imported.""" + + def test_schema_import(self): + """Schema module should import and export expected classes.""" + from . import schema + self.assertTrue(hasattr(schema, 'Command')) + self.assertTrue(hasattr(schema, 'Argument')) + self.assertTrue(hasattr(schema, 'Positional')) + self.assertTrue(hasattr(schema, 'CLISchema')) + + def test_commands_import(self): + """Commands module should import and have MFC_CLI_SCHEMA.""" + from . import commands + self.assertTrue(hasattr(commands, 'MFC_CLI_SCHEMA')) + self.assertIsNotNone(commands.MFC_CLI_SCHEMA) + + def test_argparse_gen_import(self): + """Argparse generator should import.""" + from . import argparse_gen + self.assertTrue(hasattr(argparse_gen, 'generate_parser')) + + def test_completion_gen_import(self): + """Completion generator should import.""" + from . import completion_gen + self.assertTrue(hasattr(completion_gen, 'generate_bash_completion')) + self.assertTrue(hasattr(completion_gen, 'generate_zsh_completion')) + + def test_docs_gen_import(self): + """Docs generator should import.""" + from . import docs_gen + self.assertTrue(hasattr(docs_gen, 'generate_cli_reference')) + + +class TestCliSchema(unittest.TestCase): + """Test CLI schema structure.""" + + def test_cli_schema_has_commands(self): + """MFC_CLI_SCHEMA should have commands defined.""" + from .commands import MFC_CLI_SCHEMA + self.assertTrue(len(MFC_CLI_SCHEMA.commands) > 0) + + def test_cli_schema_has_description(self): + """MFC_CLI_SCHEMA should have a description.""" + from .commands import MFC_CLI_SCHEMA + self.assertIsNotNone(MFC_CLI_SCHEMA.description) + self.assertIsInstance(MFC_CLI_SCHEMA.description, str) + + def test_commands_have_names(self): + """Each command should have a name.""" + from .commands import MFC_CLI_SCHEMA + for cmd in MFC_CLI_SCHEMA.commands: + self.assertIsNotNone(cmd.name, f"Command missing name") + self.assertTrue(len(cmd.name) > 0, f"Command has empty name") + + +class TestArgparseGenerator(unittest.TestCase): + """Test argparse generator.""" + + def test_generate_parser_returns_parser(self): + """generate_parser should return a tuple with ArgumentParser.""" + import argparse + from .argparse_gen import generate_parser + from .commands import MFC_CLI_SCHEMA + + result = generate_parser(MFC_CLI_SCHEMA) + # Returns (parser, subparsers_dict) + self.assertIsInstance(result, tuple) + self.assertEqual(len(result), 2) + parser, subparsers = result + self.assertIsInstance(parser, argparse.ArgumentParser) + self.assertIsInstance(subparsers, dict) + + def test_parser_has_subparsers(self): + """Parser should have subparsers for each command.""" + from .argparse_gen import generate_parser + from .commands import MFC_CLI_SCHEMA + + parser, subparsers = generate_parser(MFC_CLI_SCHEMA) + # Should have subparsers for all commands + self.assertTrue(len(subparsers) > 0) + # Parser should not raise error when printing help + try: + parser.format_help() + except Exception as e: + self.fail(f"Parser help failed: {e}") + + +class TestCompletionGenerator(unittest.TestCase): + """Test completion script generators.""" + + def test_bash_completion_generates_output(self): + """Bash completion should generate non-empty output.""" + from .completion_gen import generate_bash_completion + from .commands import MFC_CLI_SCHEMA + + output = generate_bash_completion(MFC_CLI_SCHEMA) + self.assertIsInstance(output, str) + self.assertTrue(len(output) > 100) # Should be substantial + self.assertIn("complete", output.lower()) # Should contain bash complete + + def test_zsh_completion_generates_output(self): + """Zsh completion should generate non-empty output.""" + from .completion_gen import generate_zsh_completion + from .commands import MFC_CLI_SCHEMA + + output = generate_zsh_completion(MFC_CLI_SCHEMA) + self.assertIsInstance(output, str) + self.assertTrue(len(output) > 100) + self.assertIn("compdef", output.lower()) # Should contain zsh compdef + + +class TestDocsGenerator(unittest.TestCase): + """Test documentation generator.""" + + def test_docs_generates_markdown(self): + """Docs generator should produce markdown output.""" + from .docs_gen import generate_cli_reference + from .commands import MFC_CLI_SCHEMA + + output = generate_cli_reference(MFC_CLI_SCHEMA) + self.assertIsInstance(output, str) + self.assertTrue(len(output) > 100) + self.assertIn("#", output) # Should contain markdown headers + + +if __name__ == "__main__": + unittest.main() diff --git a/toolchain/mfc/common.py b/toolchain/mfc/common.py index 6061f14b5b..ce02e8251c 100644 --- a/toolchain/mfc/common.py +++ b/toolchain/mfc/common.py @@ -1,10 +1,35 @@ -import os, yaml, typing, shutil, subprocess +import os, yaml, typing, shutil, subprocess, logging from os.path import join, abspath, normpath, dirname, realpath from .printer import cons +# Debug logging infrastructure +_debug_logger = None + +def setup_debug_logging(enabled: bool = False): + """Setup debug logging for troubleshooting.""" + global _debug_logger # pylint: disable=global-statement + if enabled: + logging.basicConfig( + level=logging.DEBUG, + format='[DEBUG %(asctime)s] %(message)s', + datefmt='%H:%M:%S' + ) + _debug_logger = logging.getLogger('mfc') + _debug_logger.setLevel(logging.DEBUG) + cons.print("[dim]Debug logging enabled[/dim]") + else: + _debug_logger = None + +def debug(msg: str): + """Log a debug message if debug logging is enabled.""" + if _debug_logger: + _debug_logger.debug(msg) + cons.print(f"[dim][DEBUG][/dim] {msg}") + + MFC_ROOT_DIR = abspath(normpath(f"{dirname(realpath(__file__))}/../..")) MFC_TEST_DIR = abspath(join(MFC_ROOT_DIR, "tests")) MFC_BUILD_DIR = abspath(join(MFC_ROOT_DIR, "build")) diff --git a/toolchain/mfc/completion.py b/toolchain/mfc/completion.py new file mode 100644 index 0000000000..908abbde14 --- /dev/null +++ b/toolchain/mfc/completion.py @@ -0,0 +1,220 @@ +""" +Shell completion installation for MFC. + +Installs completion scripts to ~/.local/share/mfc/completions/ and +configures the user's shell to source them automatically. +""" + +import os +import shutil +from pathlib import Path + +from .printer import cons +from .common import MFC_ROOT_DIR + + +# Installation directory (user-local, independent of MFC clone location) +COMPLETION_INSTALL_DIR = Path.home() / ".local" / "share" / "mfc" / "completions" + +# Source files +BASH_COMPLETION_SRC = Path(MFC_ROOT_DIR) / "toolchain" / "completions" / "mfc.bash" +ZSH_COMPLETION_SRC = Path(MFC_ROOT_DIR) / "toolchain" / "completions" / "_mfc" + +# Shell RC files +BASHRC = Path.home() / ".bashrc" +ZSHRC = Path.home() / ".zshrc" + +# Lines to add to RC files +BASH_SOURCE_LINE = f'[ -f "{COMPLETION_INSTALL_DIR}/mfc.bash" ] && source "{COMPLETION_INSTALL_DIR}/mfc.bash"' +ZSH_FPATH_LINE = f'fpath=("{COMPLETION_INSTALL_DIR}" $fpath)' + + +def _line_in_file(filepath: Path, line: str) -> bool: + """Check if a line (or similar) already exists in a file.""" + if not filepath.exists(): + return False + + content = filepath.read_text() + # Check for the exact line or the key part of it + return line in content or str(COMPLETION_INSTALL_DIR) in content + + +def _append_to_file(filepath: Path, line: str, comment: str = None): + """Append a line to a file if it doesn't already exist.""" + if _line_in_file(filepath, line): + cons.print(f" [dim]Already configured in {filepath}[/dim]") + return False + + with open(filepath, "a") as f: + f.write(f"\n# {comment}\n" if comment else "\n") + f.write(f"{line}\n") + + return True + + +def install_bash(): + """Install bash completion.""" + cons.print("[bold]Installing Bash completion...[/bold]") + + # Create installation directory + COMPLETION_INSTALL_DIR.mkdir(parents=True, exist_ok=True) + + # Copy completion script + dest = COMPLETION_INSTALL_DIR / "mfc.bash" + if BASH_COMPLETION_SRC.exists(): + shutil.copy2(BASH_COMPLETION_SRC, dest) + cons.print(f" [green]✓[/green] Copied completion script to [cyan]{dest}[/cyan]") + else: + cons.print(f" [red]✗[/red] Source file not found: {BASH_COMPLETION_SRC}") + return False + + # Add source line to .bashrc (creates if needed) + if _append_to_file(BASHRC, BASH_SOURCE_LINE, "MFC shell completion"): + cons.print(f" [green]✓[/green] Added source line to [cyan]{BASHRC}[/cyan]") + + cons.print() + cons.print("[green]Bash completion installed![/green]") + cons.print() + cons.print("To activate now (without restarting your shell):") + cons.print(f" [cyan]source {dest}[/cyan]") + cons.print() + cons.print("Or start a new terminal session.") + + return True + + +def install_zsh(): + """Install zsh completion.""" + cons.print("[bold]Installing Zsh completion...[/bold]") + + # Create installation directory + COMPLETION_INSTALL_DIR.mkdir(parents=True, exist_ok=True) + + # Copy completion script + dest = COMPLETION_INSTALL_DIR / "_mfc" + if ZSH_COMPLETION_SRC.exists(): + shutil.copy2(ZSH_COMPLETION_SRC, dest) + cons.print(f" [green]✓[/green] Copied completion script to [cyan]{dest}[/cyan]") + else: + cons.print(f" [red]✗[/red] Source file not found: {ZSH_COMPLETION_SRC}") + return False + + # Add fpath line to .zshrc + if ZSHRC.exists(): + if _append_to_file(ZSHRC, ZSH_FPATH_LINE, "MFC shell completion"): + cons.print(f" [green]✓[/green] Added fpath to [cyan]{ZSHRC}[/cyan]") + + # Also need to ensure compinit is called + compinit_line = "autoload -Uz compinit && compinit" + if not _line_in_file(ZSHRC, "compinit"): + _append_to_file(ZSHRC, compinit_line) + cons.print(f" [green]✓[/green] Added compinit to [cyan]{ZSHRC}[/cyan]") + else: + cons.print(f" [yellow]![/yellow] {ZSHRC} not found - you may need to configure manually") + cons.print(f" Add to your shell config: {ZSH_FPATH_LINE}") + + cons.print() + cons.print("[green]Zsh completion installed![/green]") + cons.print() + cons.print("Start a new terminal session to activate.") + + return True + + +def install_auto(): + """Auto-detect shell and install appropriate completion.""" + shell = os.environ.get("SHELL", "") + + if "zsh" in shell: + return install_zsh() + + # Default to bash + return install_bash() + + +def uninstall(): + """Remove installed completion files and RC modifications.""" + cons.print("[bold]Uninstalling MFC shell completion...[/bold]") + + # Remove completion directory + if COMPLETION_INSTALL_DIR.exists(): + shutil.rmtree(COMPLETION_INSTALL_DIR) + cons.print(f" [green]✓[/green] Removed [cyan]{COMPLETION_INSTALL_DIR}[/cyan]") + + # Note: We don't automatically remove lines from .bashrc/.zshrc + # as that's more risky. Just inform the user. + cons.print() + cons.print("[yellow]Note:[/yellow] You may want to manually remove the MFC completion lines from:") + cons.print(f" [cyan]{BASHRC}[/cyan]") + cons.print(f" [cyan]{ZSHRC}[/cyan]") + + return True + + +def show_status(): + """Show current completion installation status.""" + cons.print("[bold]MFC Shell Completion Status[/bold]") + cons.print() + + # Check installation directory + if COMPLETION_INSTALL_DIR.exists(): + cons.print(f" [green]✓[/green] Install directory: [cyan]{COMPLETION_INSTALL_DIR}[/cyan]") + + bash_installed = (COMPLETION_INSTALL_DIR / "mfc.bash").exists() + zsh_installed = (COMPLETION_INSTALL_DIR / "_mfc").exists() + + if bash_installed: + cons.print(" [green]✓[/green] Bash completion installed") + else: + cons.print(" [dim]✗ Bash completion not installed[/dim]") + + if zsh_installed: + cons.print(" [green]✓[/green] Zsh completion installed") + else: + cons.print(" [dim]✗ Zsh completion not installed[/dim]") + else: + cons.print(f" [dim]✗ Not installed[/dim]") + + cons.print() + + # Check RC files + if BASHRC.exists() and _line_in_file(BASHRC, str(COMPLETION_INSTALL_DIR)): + cons.print(f" [green]✓[/green] Configured in [cyan]{BASHRC}[/cyan]") + else: + cons.print(f" [dim]✗ Not configured in {BASHRC}[/dim]") + + if ZSHRC.exists() and _line_in_file(ZSHRC, str(COMPLETION_INSTALL_DIR)): + cons.print(f" [green]✓[/green] Configured in [cyan]{ZSHRC}[/cyan]") + else: + cons.print(f" [dim]✗ Not configured in {ZSHRC}[/dim]") + + +def completion(): + """Main entry point for completion command.""" + # pylint: disable=import-outside-toplevel + from .state import ARG + + action = ARG("completion_action") + + if action == "install": + shell = ARG("completion_shell") + if shell == "bash": + install_bash() + elif shell == "zsh": + install_zsh() + else: + install_auto() + elif action == "uninstall": + uninstall() + elif action == "status": + show_status() + else: + # Default: show status and usage + show_status() + cons.print() + cons.print("[bold]Usage:[/bold]") + cons.print(" [cyan]./mfc.sh completion install[/cyan] Auto-detect shell and install") + cons.print(" [cyan]./mfc.sh completion install bash[/cyan] Install bash completion") + cons.print(" [cyan]./mfc.sh completion install zsh[/cyan] Install zsh completion") + cons.print(" [cyan]./mfc.sh completion uninstall[/cyan] Remove completion files") + cons.print(" [cyan]./mfc.sh completion status[/cyan] Show installation status") diff --git a/toolchain/mfc/gen_case_constraints_docs.py b/toolchain/mfc/gen_case_constraints_docs.py index 8f2b75ce2a..b485f344c1 100644 --- a/toolchain/mfc/gen_case_constraints_docs.py +++ b/toolchain/mfc/gen_case_constraints_docs.py @@ -646,6 +646,7 @@ def render_markdown(rules: Iterable[Rule]) -> str: # pylint: disable=too-many-l lines: List[str] = [] + lines.append("@page case_constraints Case Creator Guide\n") lines.append("# Case Creator Guide\n") lines.append( "> **Quick reference** for building MFC cases: working examples, compatibility rules, " diff --git a/toolchain/mfc/generate.py b/toolchain/mfc/generate.py new file mode 100644 index 0000000000..2c55794602 --- /dev/null +++ b/toolchain/mfc/generate.py @@ -0,0 +1,116 @@ +""" +Generate completion scripts and documentation from CLI schema. + +This module regenerates all derived files from the single source of truth +in cli/commands.py. Run `./mfc.sh generate` after modifying commands. +""" +# pylint: disable=import-outside-toplevel + +import json +from pathlib import Path + +from .printer import cons +from .common import MFC_ROOT_DIR +from .state import ARG +from .cli.commands import MFC_CLI_SCHEMA +from .cli.completion_gen import generate_bash_completion, generate_zsh_completion +from .cli.docs_gen import generate_cli_reference + + +def _check_or_write(path: Path, content: str, check_mode: bool) -> bool: + """Check if file is up to date or write new content. Returns True on success.""" + if check_mode: + if not path.exists(): + cons.print(f"[red]ERROR:[/red] {path} does not exist") + return False + if path.read_text() != content: + cons.print(f"[red]ERROR:[/red] {path} is out of date") + cons.print("[yellow]Run ./mfc.sh generate to update[/yellow]") + return False + cons.print(f"[green]OK[/green] {path.name} is up to date") + else: + path.write_text(content) + cons.print(f"[green]Generated[/green] {path}") + return True + + +def generate(): + """Regenerate completion scripts and optionally JSON schema.""" + from .params.generators.json_schema_gen import generate_json_schema + from .params.generators.docs_gen import generate_parameter_docs + + check_mode = ARG("check") + json_schema_mode = ARG("json_schema") + + # If only generating JSON schema, do that and return + if json_schema_mode: + _generate_json_schema() + return + + completions_dir = Path(MFC_ROOT_DIR) / "toolchain" / "completions" + docs_dir = Path(MFC_ROOT_DIR) / "docs" / "documentation" + completions_dir.mkdir(exist_ok=True) + docs_dir.mkdir(exist_ok=True) + + # Generate CLI files + cli_files = [ + (completions_dir / "mfc.bash", generate_bash_completion(MFC_CLI_SCHEMA)), + (completions_dir / "_mfc", generate_zsh_completion(MFC_CLI_SCHEMA)), + (docs_dir / "cli-reference.md", generate_cli_reference(MFC_CLI_SCHEMA)), + ] + + # Generate parameter files + schema = generate_json_schema(include_descriptions=True) + schema_content = json.dumps(schema, indent=2) + params_content = generate_parameter_docs() + + param_files = [ + (Path(MFC_ROOT_DIR) / "toolchain" / "mfc-case-schema.json", schema_content), + (docs_dir / "parameters.md", params_content), + ] + + all_ok = True + for path, content in cli_files + param_files: + if not _check_or_write(path, content, check_mode): + all_ok = False + + if not all_ok: + exit(1) + + if not check_mode: + cons.print() + cons.print("[bold]Files regenerated from cli/commands.py and params/definitions.py[/bold]") + + +def _generate_json_schema(): + """Generate JSON Schema and parameter documentation (standalone mode).""" + from .params.generators.json_schema_gen import generate_json_schema, get_schema_stats + from .params.generators.docs_gen import generate_parameter_docs + from .ide import update_vscode_settings + + # Generate JSON Schema + schema = generate_json_schema(include_descriptions=True) + schema_path = Path(MFC_ROOT_DIR) / "toolchain" / "mfc-case-schema.json" + with open(schema_path, 'w') as f: + json.dump(schema, f, indent=2) + + # Generate parameter documentation + docs_path = Path(MFC_ROOT_DIR) / "docs" / "documentation" / "parameters.md" + docs_path.write_text(generate_parameter_docs()) + + # Update VS Code settings + update_vscode_settings() + + stats = get_schema_stats() + + cons.print(f"[green]Generated[/green] {schema_path}") + cons.print(f"[green]Generated[/green] {docs_path}") + cons.print() + cons.print(f"[bold]Parameter Statistics:[/bold]") + cons.print(f" Total parameters: {stats['total_params']}") + cons.print(f" With constraints: {stats['with_constraints']}") + cons.print(f" With descriptions: {stats['with_descriptions']}") + cons.print() + cons.print("[bold]Parameter Lookup:[/bold]") + cons.print(" CLI: [cyan]./mfc.sh params [/cyan]") + cons.print(" Docs: [cyan]docs/documentation/parameters.md[/cyan]") diff --git a/toolchain/mfc/ide.py b/toolchain/mfc/ide.py new file mode 100644 index 0000000000..767b67e8ae --- /dev/null +++ b/toolchain/mfc/ide.py @@ -0,0 +1,136 @@ +""" +IDE Configuration Module. + +Automatically configures IDE settings (VS Code, etc.) for MFC development. +""" +# pylint: disable=import-outside-toplevel + +import re +from pathlib import Path + +from .common import MFC_ROOT_DIR + +# Marker comments for the auto-generated section +_VSCODE_MARKER_BEGIN = "// MFC-SCHEMA-CONFIG-BEGIN (auto-generated, do not edit)" +_VSCODE_MARKER_END = "// MFC-SCHEMA-CONFIG-END" + +# The MFC schema configuration to insert +# Matches common case file names - users get auto-completion for JSON/YAML case files +_VSCODE_MFC_CONFIG = '''\ + "json.schemas": [ + { + "fileMatch": ["**/case.json", "**/input.json", "**/mfc-case.json", "**/mfc.json"], + "url": "./toolchain/mfc-case-schema.json" + } + ], + "yaml.schemas": { + "./toolchain/mfc-case-schema.json": ["**/case.yaml", "**/case.yml", "**/input.yaml", "**/input.yml", "**/mfc-case.yaml", "**/mfc.yaml"] + }''' + + +def ensure_vscode_settings() -> bool: + """ + Ensure VS Code settings include MFC schema configuration. + + This is called on every mfc.sh invocation but is very lightweight: + - Only reads/writes if the marker section is missing + - Does not regenerate the schema (that's done via generate --json-schema) + + Returns: + True if settings were updated, False if already configured + """ + vscode_dir = Path(MFC_ROOT_DIR) / ".vscode" + settings_path = vscode_dir / "settings.json" + + # Check if schema file exists (it should be committed to repo) + schema_path = Path(MFC_ROOT_DIR) / "toolchain" / "mfc-case-schema.json" + if not schema_path.exists(): + # Schema not generated yet - skip configuration + return False + + # Build the marked config block + marked_config = f"{_VSCODE_MARKER_BEGIN}\n{_VSCODE_MFC_CONFIG}\n {_VSCODE_MARKER_END}" + + if settings_path.exists(): + content = settings_path.read_text() + + # Check if our markers already exist - if so, nothing to do + if _VSCODE_MARKER_BEGIN in content: + return False + + # Insert before the final closing brace + last_brace = content.rfind('}') + if last_brace != -1: + # Check if we need a comma + before_brace = content[:last_brace].rstrip() + needs_comma = before_brace and not before_brace.endswith('{') and not before_brace.endswith(',') + comma = ',' if needs_comma else '' + new_content = ( + content[:last_brace].rstrip() + + comma + '\n\n ' + + marked_config + '\n' + + content[last_brace:] + ) + else: + # Malformed JSON, just append + new_content = content + '\n' + marked_config + else: + # Ensure .vscode directory exists + vscode_dir.mkdir(exist_ok=True) + # Create new settings file with just our config + new_content = f'{{\n {marked_config}\n}}\n' + + settings_path.write_text(new_content) + return True + + +def update_vscode_settings() -> None: + """ + Force update VS Code settings with MFC schema configuration. + + Unlike ensure_vscode_settings(), this always updates the marked section, + even if it already exists. Used by `generate --json-schema`. + """ + from .printer import cons + + vscode_dir = Path(MFC_ROOT_DIR) / ".vscode" + settings_path = vscode_dir / "settings.json" + + # Ensure .vscode directory exists + vscode_dir.mkdir(exist_ok=True) + + # Build the marked config block + marked_config = f"{_VSCODE_MARKER_BEGIN}\n{_VSCODE_MFC_CONFIG}\n {_VSCODE_MARKER_END}" + + if settings_path.exists(): + content = settings_path.read_text() + + # Check if our markers already exist + marker_pattern = re.compile( + rf'{re.escape(_VSCODE_MARKER_BEGIN)}.*?{re.escape(_VSCODE_MARKER_END)}', + re.DOTALL + ) + + if marker_pattern.search(content): + # Replace existing marked section + new_content = marker_pattern.sub(marked_config, content) + else: + # Insert before the final closing brace + last_brace = content.rfind('}') + if last_brace != -1: + before_brace = content[:last_brace].rstrip() + needs_comma = before_brace and not before_brace.endswith('{') and not before_brace.endswith(',') + comma = ',' if needs_comma else '' + new_content = ( + content[:last_brace].rstrip() + + comma + '\n\n ' + + marked_config + '\n' + + content[last_brace:] + ) + else: + new_content = content + '\n' + marked_config + else: + new_content = f'{{\n {marked_config}\n}}\n' + + settings_path.write_text(new_content) + cons.print(f"[green]Updated[/green] {settings_path}") diff --git a/toolchain/mfc/init.py b/toolchain/mfc/init.py new file mode 100644 index 0000000000..d339d7c9df --- /dev/null +++ b/toolchain/mfc/init.py @@ -0,0 +1,509 @@ +"""MFC Case Template Generator - Create new case files from templates.""" + +import os +import shutil + +from .printer import cons +from .common import MFC_EXAMPLE_DIRPATH, MFCException +from .state import ARG + + +# Built-in minimal templates +BUILTIN_TEMPLATES = { + '1D_minimal': '''\ +#!/usr/bin/env python3 +""" +1D Minimal Case Template +------------------------ +A minimal 1D shock tube case to get started with MFC. + +Usage: + ./mfc.sh run case.py +""" +import math +import json + +# ============================================================================= +# SIMULATION PARAMETERS - Modify these for your case +# ============================================================================= + +# Grid resolution +Nx = 399 # Number of cells in x-direction + +# Domain size +x_start = 0.0 # Domain start +x_end = 1.0 # Domain end + +# Time stepping +t_end = 0.1 # End time +Nt = 1000 # Number of time steps + +# Initial conditions for left state (patch 1) +rho_L = 1.0 # Density +vel_L = 0.0 # Velocity +pres_L = 1.0 # Pressure + +# Initial conditions for right state (patch 2) +rho_R = 0.125 # Density +vel_R = 0.0 # Velocity +pres_R = 0.1 # Pressure + +# Fluid properties +gamma = 1.4 # Ratio of specific heats + +# ============================================================================= +# DERIVED QUANTITIES - Usually don't need to modify +# ============================================================================= +dx = (x_end - x_start) / (Nx + 1) +dt = t_end / Nt + +# ============================================================================= +# CASE DICTIONARY - MFC configuration +# ============================================================================= +print(json.dumps({ + # Logistics + "run_time_info": "T", + + # Computational Domain + "x_domain%beg": x_start, + "x_domain%end": x_end, + "m": Nx, + "n": 0, + "p": 0, + "dt": dt, + "t_step_start": 0, + "t_step_stop": Nt, + "t_step_save": max(1, Nt // 10), + + # Simulation Algorithm + "num_patches": 2, + "model_eqns": 2, # 5-equation model + "num_fluids": 1, + "time_stepper": 3, # TVD RK3 + "weno_order": 5, # WENO5 + "weno_eps": 1.0e-16, + "mapped_weno": "T", + "riemann_solver": 2, # HLLC + "wave_speeds": 1, + "avg_state": 2, + + # Boundary Conditions (-3 = extrapolation) + "bc_x%beg": -3, + "bc_x%end": -3, + + # Output + "format": 1, + "precision": 2, + "prim_vars_wrt": "T", + "parallel_io": "T", + + # Patch 1: Left state + "patch_icpp(1)%geometry": 1, + "patch_icpp(1)%x_centroid": (x_start + x_end) / 4, + "patch_icpp(1)%length_x": (x_end - x_start) / 2, + "patch_icpp(1)%vel(1)": vel_L, + "patch_icpp(1)%pres": pres_L, + "patch_icpp(1)%alpha_rho(1)": rho_L, + "patch_icpp(1)%alpha(1)": 1.0, + + # Patch 2: Right state + "patch_icpp(2)%geometry": 1, + "patch_icpp(2)%x_centroid": 3 * (x_start + x_end) / 4, + "patch_icpp(2)%length_x": (x_end - x_start) / 2, + "patch_icpp(2)%vel(1)": vel_R, + "patch_icpp(2)%pres": pres_R, + "patch_icpp(2)%alpha_rho(1)": rho_R, + "patch_icpp(2)%alpha(1)": 1.0, + + # Fluid Properties + "fluid_pp(1)%gamma": 1.0 / (gamma - 1.0), + "fluid_pp(1)%pi_inf": 0.0, +})) +''', + + '2D_minimal': '''\ +#!/usr/bin/env python3 +""" +2D Minimal Case Template +------------------------ +A minimal 2D case with a circular perturbation. + +Usage: + ./mfc.sh run case.py +""" +import math +import json + +# ============================================================================= +# SIMULATION PARAMETERS - Modify these for your case +# ============================================================================= + +# Grid resolution +Nx = 99 # Cells in x-direction +Ny = 99 # Cells in y-direction + +# Domain size +x_start, x_end = 0.0, 1.0 +y_start, y_end = 0.0, 1.0 + +# Time stepping +dt = 1.0e-6 +Nt = 1000 + +# Background state +rho_bg = 1.0 +vel_x_bg = 0.0 +vel_y_bg = 0.0 +pres_bg = 1.0e5 + +# Perturbation (circular region) +x_center = 0.5 +y_center = 0.5 +radius = 0.1 +rho_pert = 2.0 +pres_pert = 2.0e5 + +# Fluid properties +gamma = 1.4 + +# ============================================================================= +# CASE DICTIONARY - MFC configuration +# ============================================================================= +print(json.dumps({ + # Logistics + "run_time_info": "T", + + # Computational Domain + "x_domain%beg": x_start, + "x_domain%end": x_end, + "y_domain%beg": y_start, + "y_domain%end": y_end, + "m": Nx, + "n": Ny, + "p": 0, + "dt": dt, + "t_step_start": 0, + "t_step_stop": Nt, + "t_step_save": max(1, Nt // 10), + + # Simulation Algorithm + "num_patches": 2, + "model_eqns": 2, + "num_fluids": 1, + "time_stepper": 3, + "weno_order": 5, + "weno_eps": 1.0e-16, + "mapped_weno": "T", + "riemann_solver": 2, + "wave_speeds": 1, + "avg_state": 2, + + # Boundary Conditions + "bc_x%beg": -3, + "bc_x%end": -3, + "bc_y%beg": -3, + "bc_y%end": -3, + + # Output + "format": 1, + "precision": 2, + "prim_vars_wrt": "T", + "parallel_io": "T", + + # Patch 1: Background + "patch_icpp(1)%geometry": 3, # Rectangle + "patch_icpp(1)%x_centroid": (x_start + x_end) / 2, + "patch_icpp(1)%y_centroid": (y_start + y_end) / 2, + "patch_icpp(1)%length_x": x_end - x_start, + "patch_icpp(1)%length_y": y_end - y_start, + "patch_icpp(1)%vel(1)": vel_x_bg, + "patch_icpp(1)%vel(2)": vel_y_bg, + "patch_icpp(1)%pres": pres_bg, + "patch_icpp(1)%alpha_rho(1)": rho_bg, + "patch_icpp(1)%alpha(1)": 1.0, + + # Patch 2: Circular perturbation + "patch_icpp(2)%geometry": 2, # Circle + "patch_icpp(2)%x_centroid": x_center, + "patch_icpp(2)%y_centroid": y_center, + "patch_icpp(2)%radius": radius, + "patch_icpp(2)%alter_patch(1)": "T", + "patch_icpp(2)%vel(1)": vel_x_bg, + "patch_icpp(2)%vel(2)": vel_y_bg, + "patch_icpp(2)%pres": pres_pert, + "patch_icpp(2)%alpha_rho(1)": rho_pert, + "patch_icpp(2)%alpha(1)": 1.0, + + # Fluid Properties + "fluid_pp(1)%gamma": 1.0 / (gamma - 1.0), + "fluid_pp(1)%pi_inf": 0.0, +})) +''', + + '3D_minimal': '''\ +#!/usr/bin/env python3 +""" +3D Minimal Case Template +------------------------ +A minimal 3D case with a spherical perturbation. + +Usage: + ./mfc.sh run case.py -N 2 -n 4 # Run on 2 nodes with 4 tasks each +""" +import math +import json + +# ============================================================================= +# SIMULATION PARAMETERS - Modify these for your case +# ============================================================================= + +# Grid resolution (keep low for testing, increase for production) +Nx = 49 # Cells in x-direction +Ny = 49 # Cells in y-direction +Nz = 49 # Cells in z-direction + +# Domain size +x_start, x_end = 0.0, 1.0 +y_start, y_end = 0.0, 1.0 +z_start, z_end = 0.0, 1.0 + +# Time stepping +dt = 1.0e-6 +Nt = 100 # Keep low for testing + +# Background state +rho_bg = 1.0 +pres_bg = 1.0e5 + +# Spherical perturbation +x_center = 0.5 +y_center = 0.5 +z_center = 0.5 +radius = 0.1 +rho_pert = 2.0 +pres_pert = 2.0e5 + +# Fluid properties +gamma = 1.4 + +# ============================================================================= +# CASE DICTIONARY - MFC configuration +# ============================================================================= +print(json.dumps({ + # Logistics + "run_time_info": "T", + + # Computational Domain + "x_domain%beg": x_start, + "x_domain%end": x_end, + "y_domain%beg": y_start, + "y_domain%end": y_end, + "z_domain%beg": z_start, + "z_domain%end": z_end, + "m": Nx, + "n": Ny, + "p": Nz, + "dt": dt, + "t_step_start": 0, + "t_step_stop": Nt, + "t_step_save": max(1, Nt // 10), + + # Simulation Algorithm + "num_patches": 2, + "model_eqns": 2, + "num_fluids": 1, + "time_stepper": 3, + "weno_order": 5, + "weno_eps": 1.0e-16, + "mapped_weno": "T", + "riemann_solver": 2, + "wave_speeds": 1, + "avg_state": 2, + + # Boundary Conditions + "bc_x%beg": -3, + "bc_x%end": -3, + "bc_y%beg": -3, + "bc_y%end": -3, + "bc_z%beg": -3, + "bc_z%end": -3, + + # Output + "format": 1, + "precision": 2, + "prim_vars_wrt": "T", + "parallel_io": "T", + + # Patch 1: Background (cube) + "patch_icpp(1)%geometry": 9, + "patch_icpp(1)%x_centroid": (x_start + x_end) / 2, + "patch_icpp(1)%y_centroid": (y_start + y_end) / 2, + "patch_icpp(1)%z_centroid": (z_start + z_end) / 2, + "patch_icpp(1)%length_x": x_end - x_start, + "patch_icpp(1)%length_y": y_end - y_start, + "patch_icpp(1)%length_z": z_end - z_start, + "patch_icpp(1)%vel(1)": 0.0, + "patch_icpp(1)%vel(2)": 0.0, + "patch_icpp(1)%vel(3)": 0.0, + "patch_icpp(1)%pres": pres_bg, + "patch_icpp(1)%alpha_rho(1)": rho_bg, + "patch_icpp(1)%alpha(1)": 1.0, + + # Patch 2: Spherical perturbation + "patch_icpp(2)%geometry": 8, # Sphere + "patch_icpp(2)%x_centroid": x_center, + "patch_icpp(2)%y_centroid": y_center, + "patch_icpp(2)%z_centroid": z_center, + "patch_icpp(2)%radius": radius, + "patch_icpp(2)%alter_patch(1)": "T", + "patch_icpp(2)%vel(1)": 0.0, + "patch_icpp(2)%vel(2)": 0.0, + "patch_icpp(2)%vel(3)": 0.0, + "patch_icpp(2)%pres": pres_pert, + "patch_icpp(2)%alpha_rho(1)": rho_pert, + "patch_icpp(2)%alpha(1)": 1.0, + + # Fluid Properties + "fluid_pp(1)%gamma": 1.0 / (gamma - 1.0), + "fluid_pp(1)%pi_inf": 0.0, +})) +''', +} + + +def get_available_templates(): + """Get list of available templates (built-in + examples).""" + templates = list(BUILTIN_TEMPLATES.keys()) + + # Add examples as templates + if os.path.isdir(MFC_EXAMPLE_DIRPATH): + for name in sorted(os.listdir(MFC_EXAMPLE_DIRPATH)): + example_path = os.path.join(MFC_EXAMPLE_DIRPATH, name) + if os.path.isdir(example_path) and os.path.isfile(os.path.join(example_path, 'case.py')): + templates.append(f"example:{name}") + + return templates + + +def list_templates(): + """Print available templates.""" + cons.print("[bold]Available Templates[/bold]\n") + + cons.print(" [bold cyan]Built-in Templates:[/bold cyan]") + for name in sorted(BUILTIN_TEMPLATES.keys()): + desc = { + '1D_minimal': 'Minimal 1D shock tube case', + '2D_minimal': 'Minimal 2D case with circular perturbation', + '3D_minimal': 'Minimal 3D case with spherical perturbation', + }.get(name, '') + cons.print(f" [green]{name:20s}[/green] {desc}") + + cons.print() + cons.print(" [bold cyan]From Examples:[/bold cyan]") + + if os.path.isdir(MFC_EXAMPLE_DIRPATH): + examples = [] + for name in sorted(os.listdir(MFC_EXAMPLE_DIRPATH)): + example_path = os.path.join(MFC_EXAMPLE_DIRPATH, name) + if os.path.isdir(example_path) and os.path.isfile(os.path.join(example_path, 'case.py')): + examples.append(name) + + # Group by dimension + for dim in ['0D', '1D', '2D', '3D']: + dim_examples = [e for e in examples if e.startswith(dim)] + if dim_examples: + cons.print(f" [dim]{dim}:[/dim] {', '.join(dim_examples[:5])}", end='') + if len(dim_examples) > 5: + cons.print(f" [dim]... (+{len(dim_examples) - 5} more)[/dim]") + else: + cons.print() + + cons.print() + cons.print(" [bold]Usage:[/bold]") + cons.print(" ./mfc.sh new my_case # Use default 1D template") + cons.print(" ./mfc.sh new my_case --template 2D_minimal # Use 2D template") + cons.print(" ./mfc.sh new my_case --template example:1D_sodshocktube # Copy from example") + cons.print() + + +def create_case(name: str, template: str): + """Create a new case from a template.""" + # Determine output directory + output_dir = os.path.abspath(name) + + if os.path.exists(output_dir): + raise MFCException(f"Directory already exists: {output_dir}") + + # Check if it's a built-in template + if template in BUILTIN_TEMPLATES: + os.makedirs(output_dir, exist_ok=True) + case_path = os.path.join(output_dir, 'case.py') + + with open(case_path, 'w') as f: + f.write(BUILTIN_TEMPLATES[template]) + + os.chmod(case_path, 0o755) # Make executable + + cons.print(f"[bold green]Created[/bold green] {output_dir}/") + cons.print(f" Using template: [cyan]{template}[/cyan]") + cons.print() + cons.print(" [bold]Next steps:[/bold]") + cons.print(f" 1. Edit [cyan]{name}/case.py[/cyan] to configure your simulation") + cons.print(f" 2. Run: [cyan]./mfc.sh run {name}/case.py[/cyan]") + cons.print() + + # Check if it's an example template + elif template.startswith('example:'): + example_name = template[8:] # Remove 'example:' prefix + example_path = os.path.join(MFC_EXAMPLE_DIRPATH, example_name) + + if not os.path.isdir(example_path): + raise MFCException(f"Example not found: {example_name}") + + # Copy the example directory + shutil.copytree(example_path, output_dir) + + cons.print(f"[bold green]Created[/bold green] {output_dir}/") + cons.print(f" Copied from example: [cyan]{example_name}[/cyan]") + cons.print() + cons.print(" [bold]Next steps:[/bold]") + cons.print(f" 1. Review and modify [cyan]{name}/case.py[/cyan]") + cons.print(f" 2. Run: [cyan]./mfc.sh run {name}/case.py[/cyan]") + cons.print() + + else: + available = ', '.join(list(BUILTIN_TEMPLATES.keys())[:3]) + raise MFCException( + f"Unknown template: {template}\n" + f"Available built-in templates: {available}\n" + f"Or use 'example:' to copy from examples.\n" + f"Run './mfc.sh new --list' to see all available templates." + ) + + +def init(): + """Main entry point for the init command.""" + if ARG("list"): + list_templates() + return + + name = ARG("name") + template = ARG("template") + + if not name: + # Show full help like ./mfc.sh new -h + # pylint: disable=import-outside-toplevel + import sys + from .user_guide import print_command_help + from .cli.commands import MFC_CLI_SCHEMA + from .cli.argparse_gen import generate_parser + from .state import MFCConfig + + print_command_help("new", show_argparse=False) + _, subparser_map = generate_parser(MFC_CLI_SCHEMA, MFCConfig()) + subparser_map["new"].print_help() + sys.stdout.flush() + sys.stderr.write("\n./mfc.sh new: error: the following arguments are required: NAME\n") + sys.exit(2) + + create_case(name, template) diff --git a/toolchain/mfc/params/__init__.py b/toolchain/mfc/params/__init__.py new file mode 100644 index 0000000000..079f04768b --- /dev/null +++ b/toolchain/mfc/params/__init__.py @@ -0,0 +1,29 @@ +""" +MFC Parameter Schema Package (Minimal). + +Single source of truth for MFC's ~3,300 case parameters. + +Import Order +------------ +The imports below follow a specific order: +1. REGISTRY is imported first (empty at this point) +2. Schema classes (ParamDef, ParamType) for type definitions +3. definitions module is imported LAST to populate and freeze REGISTRY + +The definitions import is a side-effect import that registers all ~3,300 +parameters with REGISTRY and then freezes it. This must happen at package +import time so that any code importing from this package gets a fully +populated, immutable registry. + +After initialization, REGISTRY.is_frozen is True and any attempt to +register new parameters will raise RegistryFrozenError. +""" + +from .registry import REGISTRY, RegistryFrozenError +from .schema import ParamDef, ParamType + +# IMPORTANT: This import populates REGISTRY with all parameter definitions +# and freezes it. It must come after REGISTRY is imported and must not be removed. +from . import definitions # noqa: F401 pylint: disable=unused-import + +__all__ = ['REGISTRY', 'RegistryFrozenError', 'ParamDef', 'ParamType'] diff --git a/toolchain/mfc/params/definitions.py b/toolchain/mfc/params/definitions.py new file mode 100644 index 0000000000..f3807dd451 --- /dev/null +++ b/toolchain/mfc/params/definitions.py @@ -0,0 +1,887 @@ +""" +MFC Parameter Definitions (Compact). + +Single file containing all ~3,300 parameter definitions using loops. +This replaces the definitions/ directory. +""" + +import re +from typing import Dict, Any +from .schema import ParamDef, ParamType +from .registry import REGISTRY + +# Index limits +NP, NF, NI, NA, NPR, NB = 10, 10, 10, 4, 10, 10 # patches, fluids, ibs, acoustic, probes, bc_patches + + +# ============================================================================= +# Auto-generated Descriptions +# ============================================================================= +# Descriptions are auto-generated from parameter names using naming conventions. +# Override with explicit desc= parameter when auto-generation is inadequate. + +# Prefix descriptions for indexed parameter families +_PREFIX_DESCS = { + "patch_icpp": "initial condition patch", + "patch_ib": "immersed boundary", + "patch_bc": "boundary condition patch", + "fluid_pp": "fluid", + "acoustic": "acoustic source", + "probe": "probe", + "integral": "integral region", +} + +# Attribute descriptions (suffix after %) +_ATTR_DESCS = { + # Geometry/position + "geometry": "Geometry type", + "x_centroid": "X-coordinate of centroid", + "y_centroid": "Y-coordinate of centroid", + "z_centroid": "Z-coordinate of centroid", + "length_x": "X-dimension length", + "length_y": "Y-dimension length", + "length_z": "Z-dimension length", + "radius": "Radius", + "radii": "Radii array", + "normal": "Normal direction", + "theta": "Theta angle", + "angles": "Orientation angles", + # Physics + "vel": "Velocity", + "pres": "Pressure", + "rho": "Density", + "alpha": "Volume fraction", + "alpha_rho": "Partial density", + "gamma": "Specific heat ratio", + "pi_inf": "Stiffness pressure", + "cv": "Specific heat (const. volume)", + "qv": "Heat of formation", + "qvp": "Heat of formation prime", + "G": "Shear modulus", + "Re": "Reynolds number", + "mul0": "Reference viscosity", + "ss": "Surface tension", + "pv": "Vapor pressure", + # MHD + "Bx": "Magnetic field (x-component)", + "By": "Magnetic field (y-component)", + "Bz": "Magnetic field (z-component)", + # Model/smoothing + "smoothen": "Enable smoothing", + "smooth_patch_id": "Patch ID to smooth against", + "smooth_coeff": "Smoothing coefficient", + "alter_patch": "Alter with another patch", + "model_filepath": "STL model file path", + "model_spc": "Model spacing", + "model_threshold": "Model threshold", + "model_translate": "Model translation", + "model_scale": "Model scale", + "model_rotate": "Model rotation", + # Bubbles + "r0": "Initial bubble radius", + "v0": "Initial bubble velocity", + "p0": "Initial bubble pressure", + "m0": "Initial bubble mass", + # IB specific + "slip": "Enable slip condition", + "moving_ibm": "Enable moving boundary", + "angular_vel": "Angular velocity", + "mass": "Mass", + # BC specific + "vel_in": "Inlet velocity", + "vel_out": "Outlet velocity", + "alpha_rho_in": "Inlet partial density", + "alpha_in": "Inlet volume fraction", + "pres_in": "Inlet pressure", + "pres_out": "Outlet pressure", + "grcbc_in": "Enable GRCBC inlet", + "grcbc_out": "Enable GRCBC outlet", + "grcbc_vel_out": "Enable GRCBC velocity outlet", + # Acoustic + "loc": "Location", + "mag": "Magnitude", + "pulse": "Pulse type", + "support": "Support type", + "frequency": "Frequency", + "wavelength": "Wavelength", + "length": "Length", + "height": "Height", + "delay": "Delay time", + "dipole": "Enable dipole", + "dir": "Direction", + # Output + "x": "X-coordinate", + "y": "Y-coordinate", + "z": "Z-coordinate", + "xmin": "X minimum", + "xmax": "X maximum", + "ymin": "Y minimum", + "ymax": "Y maximum", + "zmin": "Z minimum", + "zmax": "Z maximum", + # Chemistry + "Y": "Species mass fraction", + # Shape coefficients + "a": "Shape coefficient", + # Elasticity + "tau_e": "Elastic stress component", + # Misc + "cf_val": "Color function value", + "hcid": "Hard-coded ID", + "epsilon": "Interface thickness", + "beta": "Shape parameter beta", + "non_axis_sym": "Non-axisymmetric parameter", +} + +# Simple parameter descriptions (non-indexed) +_SIMPLE_DESCS = { + # Grid + "m": "Grid cells in x-direction", + "n": "Grid cells in y-direction", + "p": "Grid cells in z-direction", + "cyl_coord": "Enable cylindrical coordinates", + "stretch_x": "Enable grid stretching in x", + "stretch_y": "Enable grid stretching in y", + "stretch_z": "Enable grid stretching in z", + "a_x": "Grid stretching rate in x", + "a_y": "Grid stretching rate in y", + "a_z": "Grid stretching rate in z", + "x_a": "Stretching start (negative x)", + "x_b": "Stretching start (positive x)", + "y_a": "Stretching start (negative y)", + "y_b": "Stretching start (positive y)", + "z_a": "Stretching start (negative z)", + "z_b": "Stretching start (positive z)", + "loops_x": "Stretching iterations in x", + "loops_y": "Stretching iterations in y", + "loops_z": "Stretching iterations in z", + # Time + "dt": "Time step size", + "t_step_start": "Starting time step", + "t_step_stop": "Ending time step", + "t_step_save": "Save interval (steps)", + "t_step_print": "Print interval (steps)", + "t_stop": "Stop time", + "t_save": "Save interval (time)", + "time_stepper": "Time integration scheme (1=Euler, 2=RK2, 3=RK3)", + "cfl_target": "Target CFL number", + "cfl_max": "Maximum CFL number", + "cfl_adap_dt": "Enable adaptive CFL time stepping", + "cfl_const_dt": "Use constant CFL time stepping", + "cfl_dt": "Enable CFL-based time stepping", + "adap_dt": "Enable adaptive time stepping", + "adap_dt_tol": "Adaptive time stepping tolerance", + "adap_dt_max_iters": "Max iterations for adaptive dt", + "t_tol": "Time tolerance", + # Model + "model_eqns": "Model equations (1=gamma, 2=5-eq, 3=6-eq, 4=4-eq)", + "num_fluids": "Number of fluids", + "num_patches": "Number of IC patches", + "mpp_lim": "Mixture pressure positivity limiter", + # WENO + "weno_order": "WENO reconstruction order", + "weno_eps": "WENO epsilon parameter", + "mapped_weno": "Enable mapped WENO", + "wenoz": "Enable WENO-Z", + "teno": "Enable TENO", + "mp_weno": "Enable monotonicity-preserving WENO", + # Riemann + "riemann_solver": "Riemann solver (1=HLL, 2=HLLC, 3=exact)", + "wave_speeds": "Wave speed estimate method", + "avg_state": "Average state (1=Roe, 2=arithmetic)", + # Physics toggles + "viscous": "Enable viscous effects", + "mhd": "Enable magnetohydrodynamics", + "hyper_cleaning": "Enable hyperbolic divergence cleaning", + "hyper_cleaning_speed": "Divergence cleaning wave speed", + "hyper_cleaning_tau": "Divergence cleaning damping time", + "powell": "Enable Powell source terms for MHD", + "bubbles_euler": "Enable Euler bubble model", + "bubbles_lagrange": "Enable Lagrangian bubbles", + "polytropic": "Enable polytropic gas", + "polydisperse": "Enable polydisperse bubbles", + "qbmm": "Enable QBMM", + "chemistry": "Enable chemistry", + "surface_tension": "Enable surface tension", + "hypoelasticity": "Enable hypoelastic model", + "hyperelasticity": "Enable hyperelastic model", + "relativity": "Enable special relativity", + "ib": "Enable immersed boundaries", + "acoustic_source": "Enable acoustic sources", + # Output + "parallel_io": "Enable parallel I/O", + "probe_wrt": "Write probe data", + "prim_vars_wrt": "Write primitive variables", + "cons_vars_wrt": "Write conservative variables", + "run_time_info": "Print runtime info", + # Misc + "case_dir": "Case directory path", + "cantera_file": "Cantera mechanism file", + "num_ibs": "Number of immersed boundaries", + "num_source": "Number of acoustic sources", + "num_probes": "Number of probes", + "num_integrals": "Number of integral regions", + "nb": "Number of bubble bins", + "R0ref": "Reference bubble radius", + "sigma": "Surface tension coefficient", + "Bx0": "Background magnetic field (x)", + "old_grid": "Load grid from previous simulation", + "old_ic": "Load initial conditions from previous", + "t_step_old": "Time step to restart from", + "fd_order": "Finite difference order", + "recon_type": "Reconstruction type (1=WENO, 2=MUSCL)", + "muscl_order": "MUSCL reconstruction order", + "muscl_lim": "MUSCL limiter type", + "low_Mach": "Low Mach number correction", + "bubble_model": "Bubble dynamics model", + "Ca": "Cavitation number", + "Web": "Weber number", + "Re_inv": "Inverse Reynolds number", + "format": "Output format (1=Silo, 2=binary)", + "precision": "Output precision (1=single, 2=double)", + # Body forces + "bf_x": "Enable body force in x", + "bf_y": "Enable body force in y", + "bf_z": "Enable body force in z", + "k_x": "Body force wavenumber in x", + "k_y": "Body force wavenumber in y", + "k_z": "Body force wavenumber in z", + "w_x": "Body force frequency in x", + "w_y": "Body force frequency in y", + "w_z": "Body force frequency in z", + "p_x": "Body force phase in x", + "p_y": "Body force phase in y", + "p_z": "Body force phase in z", + "g_x": "Gravitational acceleration in x", + "g_y": "Gravitational acceleration in y", + "g_z": "Gravitational acceleration in z", + # More output + "E_wrt": "Write energy field", + "c_wrt": "Write sound speed field", + "rho_wrt": "Write density field", + "pres_wrt": "Write pressure field", + "schlieren_wrt": "Write schlieren images", + "cf_wrt": "Write color function", + "omega_wrt": "Write vorticity", + "qm_wrt": "Write Q-criterion", + "liutex_wrt": "Write Liutex vortex field", + "gamma_wrt": "Write gamma field", + "heat_ratio_wrt": "Write heat capacity ratio", + "pi_inf_wrt": "Write pi_inf field", + "pres_inf_wrt": "Write reference pressure", + "fft_wrt": "Write FFT output", + "kappa_wrt": "Write curvature field", + "chem_wrt_T": "Write temperature (chemistry)", + # Misc physics + "alt_soundspeed": "Alternative sound speed formulation", + "mixture_err": "Enable mixture error checking", + "cont_damage": "Enable continuum damage model", +} + + +def _auto_describe(name: str) -> str: + """Auto-generate description from parameter name.""" + # Check simple params first + if name in _SIMPLE_DESCS: + return _SIMPLE_DESCS[name] + + # Handle indexed params: prefix(N)%attr or prefix(N)%attr(M) + match = re.match(r"([a-z_]+)\((\d+)\)%(.+)", name) + if match: + prefix, idx, attr = match.group(1), match.group(2), match.group(3) + prefix_desc = _PREFIX_DESCS.get(prefix, prefix.replace("_", " ")) + + # Check for nested index: attr(M) or attr(M, K) + attr_match = re.match(r"([a-z_]+)\((\d+)(?:,\s*(\d+))?\)", attr) + if attr_match: + attr_base = attr_match.group(1) + idx2 = attr_match.group(2) + attr_desc = _ATTR_DESCS.get(attr_base, attr_base.replace("_", " ")) + return f"{attr_desc} {idx2} for {prefix_desc} {idx}" + + attr_desc = _ATTR_DESCS.get(attr, attr.replace("_", " ")) + return f"{attr_desc} for {prefix_desc} {idx}" + + # Handle bc_x%attr style (no index in prefix) + if "%" in name: + prefix, attr = name.split("%", 1) + # Check for indexed attr + attr_match = re.match(r"([a-z_]+)\((\d+)\)", attr) + if attr_match: + attr_base, idx = attr_match.group(1), attr_match.group(2) + attr_desc = _ATTR_DESCS.get(attr_base, attr_base.replace("_", " ")) + return f"{attr_desc} {idx} for {prefix.replace('_', ' ')}" + + attr_desc = _ATTR_DESCS.get(attr, "") + if attr_desc: + return f"{attr_desc} for {prefix.replace('_', ' ')}" + # Fallback: just clean up the name + return f"{attr.replace('_', ' ').title()} for {prefix.replace('_', ' ')}" + + # Handle suffix-indexed: name(N) or name(N, M) + match = re.match(r"([a-z_]+)\((\d+)(?:,\s*(\d+))?\)", name) + if match: + base, idx = match.group(1), match.group(2) + # Handle _wrt patterns + if base.endswith("_wrt"): + field = base[:-4].replace("_", " ") + return f"Write {field} for component {idx}" + return f"{base.replace('_', ' ').title()} {idx}" + + # Fallback patterns + if name.endswith("_wrt"): + return f"Write {name[:-4].replace('_', ' ')}" + if name.startswith("num_"): + return f"Number of {name[4:].replace('_', ' ')}" + + # Last resort: clean up the name + return name.replace("_", " ").replace("%", " ") + +# Parameters that can be hard-coded for GPU case optimization +CASE_OPT_PARAMS = { + "mapped_weno", "wenoz", "teno", "wenoz_q", "nb", "weno_order", + "num_fluids", "mhd", "relativity", "igr_order", "viscous", + "igr_iter_solver", "igr", "igr_pres_lim", "recon_type", + "muscl_order", "muscl_lim" +} + + +# ============================================================================ +# Schema Validation for Constraints and Dependencies +# ============================================================================ +# Uses rapidfuzz for "did you mean?" suggestions when typos are detected + +_VALID_CONSTRAINT_KEYS = {"choices", "min", "max"} +_VALID_DEPENDENCY_KEYS = {"when_true", "when_set"} +_VALID_CONDITION_KEYS = {"requires", "recommends"} + + +def _validate_constraint(param_name: str, constraint: Dict[str, Any]) -> None: + """Validate a constraint dict has valid keys with 'did you mean?' suggestions.""" + # Import here to avoid circular import at module load time + from .suggest import invalid_key_error # pylint: disable=import-outside-toplevel + + invalid_keys = set(constraint.keys()) - _VALID_CONSTRAINT_KEYS + if invalid_keys: + # Get suggestion for the first invalid key + first_invalid = next(iter(invalid_keys)) + raise ValueError( + invalid_key_error( + f"constraint for '{param_name}'", + first_invalid, + _VALID_CONSTRAINT_KEYS + ) + ) + + # Validate types + if "choices" in constraint and not isinstance(constraint["choices"], list): + raise ValueError(f"Constraint 'choices' for '{param_name}' must be a list") + if "min" in constraint and not isinstance(constraint["min"], (int, float)): + raise ValueError(f"Constraint 'min' for '{param_name}' must be a number") + if "max" in constraint and not isinstance(constraint["max"], (int, float)): + raise ValueError(f"Constraint 'max' for '{param_name}' must be a number") + + +def _validate_dependency(param_name: str, dependency: Dict[str, Any]) -> None: + """Validate a dependency dict has valid structure with 'did you mean?' suggestions.""" + # Import here to avoid circular import at module load time + from .suggest import invalid_key_error # pylint: disable=import-outside-toplevel + + invalid_keys = set(dependency.keys()) - _VALID_DEPENDENCY_KEYS + if invalid_keys: + first_invalid = next(iter(invalid_keys)) + raise ValueError( + invalid_key_error( + f"dependency for '{param_name}'", + first_invalid, + _VALID_DEPENDENCY_KEYS + ) + ) + + for condition_key in ["when_true", "when_set"]: + if condition_key in dependency: + condition = dependency[condition_key] + if not isinstance(condition, dict): + raise ValueError( + f"Dependency '{condition_key}' for '{param_name}' must be a dict" + ) + invalid_cond_keys = set(condition.keys()) - _VALID_CONDITION_KEYS + if invalid_cond_keys: + first_invalid = next(iter(invalid_cond_keys)) + raise ValueError( + invalid_key_error( + f"condition in '{condition_key}' for '{param_name}'", + first_invalid, + _VALID_CONDITION_KEYS + ) + ) + for req_key in ["requires", "recommends"]: + if req_key in condition and not isinstance(condition[req_key], list): + raise ValueError( + f"Dependency '{condition_key}/{req_key}' for '{param_name}' " + "must be a list" + ) + + +def _validate_all_constraints(constraints: Dict[str, Dict]) -> None: + """Validate all constraint definitions.""" + for param_name, constraint in constraints.items(): + _validate_constraint(param_name, constraint) + + +def _validate_all_dependencies(dependencies: Dict[str, Dict]) -> None: + """Validate all dependency definitions.""" + for param_name, dependency in dependencies.items(): + _validate_dependency(param_name, dependency) + + +# Parameter constraints (choices, min, max) +CONSTRAINTS = { + # Reconstruction + "weno_order": {"choices": [0, 1, 3, 5, 7]}, # 0 for MUSCL mode + "recon_type": {"choices": [1, 2]}, # 1=WENO, 2=MUSCL + "muscl_order": {"choices": [1, 2]}, + "muscl_lim": {"choices": [1, 2, 3, 4, 5]}, # minmod, MC, Van Albada, Van Leer, SUPERBEE + + # Time stepping + "time_stepper": {"choices": [1, 2, 3]}, # 1=Euler, 2=TVD-RK2, 3=TVD-RK3 + + # Riemann solver + "riemann_solver": {"choices": [1, 2, 3, 4, 5]}, # HLL, HLLC, Exact, HLLD, LF + "wave_speeds": {"choices": [1, 2]}, # direct, pressure + "avg_state": {"choices": [1, 2]}, # Roe, arithmetic + + # Model equations + "model_eqns": {"choices": [1, 2, 3, 4]}, # gamma-law, 5-eq, 6-eq, 4-eq + + # Bubbles + "bubble_model": {"choices": [1, 2, 3]}, # Gilmore, Keller-Miksis, RP + + # Output + "format": {"choices": [1, 2]}, # Silo, binary + "precision": {"choices": [1, 2]}, # single, double + + # Counts (must be positive) + "num_fluids": {"min": 1, "max": 10}, + "num_patches": {"min": 0, "max": 10}, + "num_ibs": {"min": 0, "max": 10}, + "m": {"min": 0}, + "n": {"min": 0}, + "p": {"min": 0}, +} + +# Parameter dependencies (requires, recommends) +DEPENDENCIES = { + "bubbles_euler": { + "when_true": { + "recommends": ["nb", "polytropic"], + } + }, + "viscous": { + "when_true": { + "recommends": ["fluid_pp(1)%Re(1)"], + } + }, + "polydisperse": { + "when_true": { + "requires": ["nb"], + } + }, + "chemistry": { + "when_true": { + "requires": ["cantera_file"], + } + }, + "qbmm": { + "when_true": { + "recommends": ["bubbles_euler"], + } + }, + "ib": { + "when_true": { + "requires": ["num_ibs"], + } + }, + "acoustic_source": { + "when_true": { + "requires": ["num_source"], + } + }, + "probe_wrt": { + "when_true": { + "requires": ["num_probes"], + } + }, +} + +def _r(name, ptype, tags=None, desc=None): + """Register a parameter with optional feature tags and description.""" + REGISTRY.register(ParamDef( + name=name, + param_type=ptype, + description=desc if desc else _auto_describe(name), + case_optimization=(name in CASE_OPT_PARAMS), + constraints=CONSTRAINTS.get(name), + dependencies=DEPENDENCIES.get(name), + tags=tags if tags else set(), + )) + + +def _load(): # pylint: disable=too-many-locals,too-many-statements + """Load all parameter definitions.""" + INT, REAL, LOG, STR = ParamType.INT, ParamType.REAL, ParamType.LOG, ParamType.STR + A_REAL = ParamType.ANALYTIC_REAL + + # ========================================================================== + # SIMPLE PARAMETERS (non-indexed) + # ========================================================================== + + # --- Grid --- + for n in ["m", "n", "p"]: + _r(n, INT, {"grid"}) + _r("cyl_coord", LOG, {"grid"}) + for n in ["stretch_x", "stretch_y", "stretch_z"]: + _r(n, LOG, {"grid"}) + for d in ["x", "y", "z"]: + _r(f"{d}_a", REAL, {"grid"}) + _r(f"{d}_b", REAL, {"grid"}) + _r(f"a_{d}", REAL, {"grid"}) + _r(f"loops_{d}", INT, {"grid"}) + _r(f"{d}_domain%beg", REAL, {"grid"}) + _r(f"{d}_domain%end", REAL, {"grid"}) + + # --- Time stepping --- + for n in ["time_stepper", "t_step_old", "t_step_start", "t_step_stop", + "t_step_save", "t_step_print", "adap_dt_max_iters"]: + _r(n, INT, {"time"}) + for n in ["dt", "cfl_target", "cfl_max", "t_tol", "adap_dt_tol", "t_stop", "t_save"]: + _r(n, REAL, {"time"}) + for n in ["cfl_adap_dt", "cfl_const_dt", "cfl_dt", "adap_dt"]: + _r(n, LOG, {"time"}) + + # --- WENO/reconstruction --- + _r("weno_order", INT, {"weno"}) + _r("recon_type", INT) + _r("muscl_order", INT) + _r("muscl_lim", INT) + for n in ["weno_eps", "teno_CT", "wenoz_q"]: + _r(n, REAL, {"weno"}) + for n in ["mapped_weno", "wenoz", "teno", "weno_avg", "mp_weno", "null_weights"]: + _r(n, LOG, {"weno"}) + _r("weno_Re_flux", LOG, {"weno", "viscosity"}) + + # --- Riemann solver --- + for n in ["riemann_solver", "wave_speeds", "avg_state", "low_Mach"]: + _r(n, INT, {"riemann"}) + + # --- MHD --- + _r("Bx0", REAL, {"mhd"}) + for n in ["hyper_cleaning_speed", "hyper_cleaning_tau"]: + _r(n, REAL, {"mhd"}) + for n in ["mhd", "hyper_cleaning", "powell"]: + _r(n, LOG, {"mhd"}) + + # --- Bubbles --- + for n in ["R0ref", "nb", "Web", "Ca"]: + _r(n, REAL, {"bubbles"}) + _r("Re_inv", REAL, {"bubbles", "viscosity"}) + _r("bubble_model", INT, {"bubbles"}) + for n in ["polytropic", "bubbles_euler", "polydisperse", "qbmm", "bubbles_lagrange"]: + _r(n, LOG, {"bubbles"}) + + # --- Viscosity --- + _r("viscous", LOG, {"viscosity"}) + + # --- Elasticity --- + for n in ["hypoelasticity", "hyperelasticity"]: + _r(n, LOG, {"elasticity"}) + + # --- Surface tension --- + _r("sigma", REAL, {"surface_tension"}) + _r("surface_tension", LOG, {"surface_tension"}) + + # --- Chemistry --- + _r("cantera_file", STR, {"chemistry"}) + _r("chemistry", LOG, {"chemistry"}) + + # --- Acoustic --- + _r("num_source", INT, {"acoustic"}) + _r("acoustic_source", LOG, {"acoustic"}) + + # --- Immersed boundary --- + _r("num_ibs", INT, {"ib"}) + _r("ib", LOG, {"ib"}) + + # --- Probes --- + for n in ["num_probes", "num_integrals"]: + _r(n, INT, {"probes"}) + _r("probe_wrt", LOG, {"output", "probes"}) + _r("integral_wrt", LOG, {"output", "probes"}) + + # --- Output --- + _r("precision", INT, {"output"}) + _r("format", INT, {"output"}) + _r("schlieren_alpha", REAL, {"output"}) + for n in ["parallel_io", "file_per_process", "run_time_info", "prim_vars_wrt", + "cons_vars_wrt", "fft_wrt"]: + _r(n, LOG, {"output"}) + for n in ["schlieren_wrt", "alpha_rho_wrt", "rho_wrt", "mom_wrt", "vel_wrt", + "flux_wrt", "E_wrt", "pres_wrt", "alpha_wrt", "kappa_wrt", "gamma_wrt", + "heat_ratio_wrt", "pi_inf_wrt", "pres_inf_wrt", "c_wrt", + "omega_wrt", "qm_wrt", "liutex_wrt", "cf_wrt", "sim_data", "output_partial_domain"]: + _r(n, LOG, {"output"}) + for d in ["x", "y", "z"]: + _r(f"{d}_output%beg", REAL, {"output"}) + _r(f"{d}_output%end", REAL, {"output"}) + # Lagrangian output + for v in ["lag_header", "lag_txt_wrt", "lag_db_wrt", "lag_id_wrt", "lag_pos_wrt", + "lag_pos_prev_wrt", "lag_vel_wrt", "lag_rad_wrt", "lag_rvel_wrt", + "lag_r0_wrt", "lag_rmax_wrt", "lag_rmin_wrt", "lag_dphidt_wrt", + "lag_pres_wrt", "lag_mv_wrt", "lag_mg_wrt", "lag_betaT_wrt", "lag_betaC_wrt"]: + _r(v, LOG, {"bubbles", "output"}) + + # --- Boundary conditions --- + for d in ["x", "y", "z"]: + _r(f"bc_{d}%beg", INT, {"bc"}) + _r(f"bc_{d}%end", INT, {"bc"}) + + # --- Relativity --- + _r("relativity", LOG, {"relativity"}) + + # --- Other (no specific feature tag) --- + for n in ["model_eqns", "num_fluids", "thermal", "relax_model", "igr_order", + "num_bc_patches", "num_patches", "perturb_flow_fluid", "perturb_sph_fluid", + "dist_type", "mixlayer_perturb_nk", "elliptic_smoothing_iters", + "n_start_old", "n_start", "fd_order", "num_igr_iters", + "num_igr_warm_start_iters", "igr_iter_solver", "nv_uvm_igr_temps_on_gpu", + "flux_lim"]: + _r(n, INT) + for n in ["pref", "poly_sigma", "rhoref", "mixlayer_vel_coef", "mixlayer_domain", + "mixlayer_perturb_k0", "perturb_flow_mag", "fluid_rho", "sigR", "sigV", + "rhoRV", "palpha_eps", "ptgalpha_eps", "pi_fac", "tau_star", + "cont_damage_s", "alpha_bar", "alf_factor", "ic_eps", "ic_beta"]: + _r(n, REAL) + for n in ["mpp_lim", "relax", "adv_n", "cont_damage", "igr", "down_sample", + "old_grid", "old_ic", "mixlayer_vel_profile", "mixlayer_perturb", + "perturb_flow", "perturb_sph", "pre_stress", "elliptic_smoothing", + "simplex_perturb", "alt_soundspeed", "mixture_err", "rdma_mpi", + "igr_pres_lim", "int_comp", "nv_uvm_out_of_core", "nv_uvm_pref_gpu"]: + _r(n, LOG) + _r("case_dir", STR) + + # Body force + for d in ["x", "y", "z"]: + for v in ["k", "w", "p", "g"]: + _r(f"{v}_{d}", REAL) + _r(f"bf_{d}", LOG) + + # ========================================================================== + # INDEXED PARAMETERS + # ========================================================================== + + # --- patch_icpp (10 patches) --- + for i in range(1, NP + 1): + px = f"patch_icpp({i})%" + for a in ["geometry", "smooth_patch_id", "hcid", "model_spc"]: + _r(f"{px}{a}", INT) + for a in ["smoothen", "alter_patch"] if i >= 2 else ["smoothen"]: + _r(f"{px}{a}", LOG) + for a in ["radius", "radii", "epsilon", "beta", "normal", "alpha_rho", + "non_axis_sym", "smooth_coeff", "rho", "vel", "alpha", "gamma", + "pi_inf", "cv", "qv", "qvp", "model_threshold"]: + _r(f"{px}{a}", REAL) + # Bubble fields + for a in ["r0", "v0", "p0", "m0"]: + _r(f"{px}{a}", REAL, {"bubbles"}) + for j in range(2, 10): + _r(f"{px}a({j})", REAL) + _r(f"{px}pres", A_REAL) + _r(f"{px}cf_val", A_REAL) + # MHD fields + for a in ["Bx", "By", "Bz"]: + _r(f"{px}{a}", A_REAL, {"mhd"}) + # Chemistry species + for j in range(1, 101): + _r(f"{px}Y({j})", A_REAL, {"chemistry"}) + _r(f"{px}model_filepath", STR) + for t in ["translate", "scale", "rotate"]: + for j in range(1, 4): + _r(f"{px}model_{t}({j})", REAL) + for d in ["x", "y", "z"]: + _r(f"{px}{d}_centroid", REAL) + _r(f"{px}length_{d}", REAL) + for j in range(1, 4): + _r(f"{px}radii({j})", REAL) + _r(f"{px}normal({j})", REAL) + _r(f"{px}vel({j})", A_REAL) + for f in range(1, NF + 1): + _r(f"{px}alpha({f})", A_REAL) + _r(f"{px}alpha_rho({f})", A_REAL) + # Elasticity stress tensor + for j in range(1, 7): + _r(f"{px}tau_e({j})", A_REAL, {"elasticity"}) + if i >= 2: + for j in range(1, i): + _r(f"{px}alter_patch({j})", LOG) + + # --- fluid_pp (10 fluids) --- + for f in range(1, NF + 1): + px = f"fluid_pp({f})%" + for a in ["gamma", "pi_inf", "cv", "qv", "qvp"]: + _r(f"{px}{a}", REAL) + _r(f"{px}mul0", REAL, {"viscosity"}) + _r(f"{px}ss", REAL, {"surface_tension"}) + for a in ["pv", "gamma_v", "M_v", "mu_v", "k_v", "cp_v", "D_v"]: + _r(f"{px}{a}", REAL, {"bubbles"}) + _r(f"{px}G", REAL, {"elasticity"}) + for j in [1, 2]: + _r(f"{px}Re({j})", REAL, {"viscosity"}) + + # --- bub_pp (bubble properties) --- + for a in ["R0ref", "p0ref", "rho0ref", "T0ref", "ss", "pv", "vd", + "mu_l", "mu_v", "mu_g", "gam_v", "gam_g", + "M_v", "M_g", "k_v", "k_g", "cp_v", "cp_g", "R_v", "R_g"]: + _r(f"bub_pp%{a}", REAL, {"bubbles"}) + + # --- patch_ib (10 immersed boundaries) --- + for i in range(1, NI + 1): + px = f"patch_ib({i})%" + for a in ["geometry", "moving_ibm"]: + _r(f"{px}{a}", INT, {"ib"}) + for a, pt in [("radius", REAL), ("theta", REAL), ("slip", LOG), ("c", REAL), + ("p", REAL), ("t", REAL), ("m", REAL), ("mass", REAL)]: + _r(f"{px}{a}", pt, {"ib"}) + for j in range(1, 4): + _r(f"{px}angles({j})", REAL, {"ib"}) + for d in ["x", "y", "z"]: + _r(f"{px}{d}_centroid", REAL, {"ib"}) + _r(f"{px}length_{d}", REAL, {"ib"}) + for a, pt in [("model_filepath", STR), ("model_spc", INT), ("model_threshold", REAL)]: + _r(f"{px}{a}", pt, {"ib"}) + for t in ["translate", "scale", "rotate"]: + for j in range(1, 4): + _r(f"{px}model_{t}({j})", REAL, {"ib"}) + for j in range(1, 4): + _r(f"{px}vel({j})", REAL, {"ib"}) + _r(f"{px}angular_vel({j})", REAL, {"ib"}) + + # --- acoustic sources (4 sources) --- + for i in range(1, NA + 1): + px = f"acoustic({i})%" + for a in ["pulse", "support", "num_elements", "element_on", "bb_num_freq"]: + _r(f"{px}{a}", INT, {"acoustic"}) + _r(f"{px}dipole", LOG, {"acoustic"}) + for a in ["mag", "length", "height", "wavelength", "frequency", + "gauss_sigma_dist", "gauss_sigma_time", "npulse", + "dir", "delay", "foc_length", "aperture", + "element_spacing_angle", "element_polygon_ratio", + "rotate_angle", "bb_bandwidth", "bb_lowest_freq"]: + _r(f"{px}{a}", REAL, {"acoustic"}) + for j in range(1, 4): + _r(f"{px}loc({j})", REAL, {"acoustic"}) + + # --- probes (10 probes) --- + for i in range(1, NPR + 1): + for d in ["x", "y", "z"]: + _r(f"probe({i})%{d}", REAL, {"probes"}) + + # --- integrals (5 integral regions) --- + for i in range(1, 6): + for d in ["x", "y", "z"]: + _r(f"integral({i})%{d}min", REAL, {"probes"}) + _r(f"integral({i})%{d}max", REAL, {"probes"}) + + # --- Extended BC --- + for d in ["x", "y", "z"]: + px = f"bc_{d}%" + for a in ["vb1", "vb2", "vb3", "ve1", "ve2", "ve3", "pres_in", "pres_out"]: + _r(f"{px}{a}", REAL, {"bc"}) + for a in ["grcbc_in", "grcbc_out", "grcbc_vel_out"]: + _r(f"{px}{a}", LOG, {"bc"}) + for f in range(1, NF + 1): + _r(f"{px}alpha_rho_in({f})", REAL, {"bc"}) + _r(f"{px}alpha_in({f})", REAL, {"bc"}) + for j in range(1, 4): + _r(f"{px}vel_in({j})", REAL, {"bc"}) + _r(f"{px}vel_out({j})", REAL, {"bc"}) + + # --- patch_bc (10 BC patches) --- + for i in range(1, NB + 1): + px = f"patch_bc({i})%" + for a in ["geometry", "type", "dir", "loc"]: + _r(f"{px}{a}", INT, {"bc"}) + for j in range(1, 4): + _r(f"{px}centroid({j})", REAL, {"bc"}) + _r(f"{px}length({j})", REAL, {"bc"}) + _r(f"{px}radius", REAL, {"bc"}) + + # --- simplex_params --- + for f in range(1, NF + 1): + _r(f"simplex_params%perturb_dens({f})", LOG) + _r(f"simplex_params%perturb_dens_freq({f})", REAL) + _r(f"simplex_params%perturb_dens_scale({f})", REAL) + for j in range(1, 4): + _r(f"simplex_params%perturb_dens_offset({f}, {j})", REAL) + for d in range(1, 4): + _r(f"simplex_params%perturb_vel({d})", LOG) + _r(f"simplex_params%perturb_vel_freq({d})", REAL) + _r(f"simplex_params%perturb_vel_scale({d})", REAL) + for j in range(1, 4): + _r(f"simplex_params%perturb_vel_offset({d},{j})", REAL) + + # --- lag_params (Lagrangian bubbles) --- + for a in ["heatTransfer_model", "massTransfer_model", "pressure_corrector", + "write_bubbles", "write_bubbles_stats"]: + _r(f"lag_params%{a}", LOG, {"bubbles"}) + for a in ["solver_approach", "cluster_type", "smooth_type", "nBubs_glb"]: + _r(f"lag_params%{a}", INT, {"bubbles"}) + for a in ["epsilonb", "valmaxvoid", "charwidth", "c0", "rho0", "T0", "x0", "Thost"]: + _r(f"lag_params%{a}", REAL, {"bubbles"}) + + # --- chem_params --- + for a in ["diffusion", "reactions"]: + _r(f"chem_params%{a}", LOG, {"chemistry"}) + for a in ["gamma_method", "transport_model"]: + _r(f"chem_params%{a}", INT, {"chemistry"}) + + # --- Per-fluid output arrays --- + for f in range(1, NF + 1): + _r(f"schlieren_alpha({f})", REAL, {"output"}) + for a in ["alpha_rho_wrt", "alpha_wrt", "kappa_wrt", "alpha_rho_e_wrt"]: + _r(f"{a}({f})", LOG, {"output"}) + for j in range(1, 4): + for a in ["mom_wrt", "vel_wrt", "flux_wrt", "omega_wrt"]: + _r(f"{a}({j})", LOG, {"output"}) + + # --- chem_wrt (chemistry output) --- + for j in range(1, 101): + _r(f"chem_wrt_Y({j})", LOG, {"chemistry", "output"}) + _r("chem_wrt_T", LOG, {"chemistry", "output"}) + + # --- fluid_rho --- + for f in range(1, NF + 1): + _r(f"fluid_rho({f})", REAL) + + +# Load definitions when module imported and freeze registry +def _init_registry(): + """Initialize and freeze the registry. Called once at module import.""" + try: + # Validate constraint and dependency schemas first + # This catches typos like "choises" instead of "choices" + _validate_all_constraints(CONSTRAINTS) + _validate_all_dependencies(DEPENDENCIES) + + # Load all parameter definitions + _load() + + # Freeze registry to prevent further modifications + REGISTRY.freeze() + except Exception as e: + # Re-raise with context to help debugging initialization failures + raise RuntimeError( + f"Failed to initialize parameter registry: {e}\n" + "This is likely a bug in the parameter definitions." + ) from e + +_init_registry() diff --git a/toolchain/mfc/params/descriptions.py b/toolchain/mfc/params/descriptions.py new file mode 100644 index 0000000000..03fbc60ba6 --- /dev/null +++ b/toolchain/mfc/params/descriptions.py @@ -0,0 +1,657 @@ +""" +Parameter Descriptions. + +Manual descriptions for simple parameters + pattern-based auto-generation for indexed params. +Descriptions extracted from docs/documentation/case.md where available. +""" + +import re + +# Manual descriptions for simple parameters (from docs/documentation/case.md) +DESCRIPTIONS = { + # Computational domain + "m": "Number of grid cells in the x-direction", + "n": "Number of grid cells in the y-direction", + "p": "Number of grid cells in the z-direction", + "x_domain%beg": "Beginning of the x-direction domain", + "x_domain%end": "End of the x-direction domain", + "y_domain%beg": "Beginning of the y-direction domain", + "y_domain%end": "End of the y-direction domain", + "z_domain%beg": "Beginning of the z-direction domain", + "z_domain%end": "End of the z-direction domain", + "cyl_coord": "Enable cylindrical coordinates (2D: axisymmetric, 3D: cylindrical)", + "stretch_x": "Enable grid stretching in the x-direction", + "stretch_y": "Enable grid stretching in the y-direction", + "stretch_z": "Enable grid stretching in the z-direction", + "a_x": "Rate of grid stretching in the x-direction", + "a_y": "Rate of grid stretching in the y-direction", + "a_z": "Rate of grid stretching in the z-direction", + "x_a": "Start of stretching in negative x-direction", + "x_b": "Start of stretching in positive x-direction", + "y_a": "Start of stretching in negative y-direction", + "y_b": "Start of stretching in positive y-direction", + "z_a": "Start of stretching in negative z-direction", + "z_b": "Start of stretching in positive z-direction", + "loops_x": "Number of times to apply grid stretching in x", + "loops_y": "Number of times to apply grid stretching in y", + "loops_z": "Number of times to apply grid stretching in z", + + # Time stepping + "dt": "Time step size", + "t_step_start": "Starting time step index", + "t_step_stop": "Ending time step index", + "t_step_save": "Time step interval for saving data", + "t_step_print": "Time step interval for printing info", + "t_stop": "Simulation stop time", + "t_save": "Time interval for saving data", + "time_stepper": "Time integration scheme (1=Euler, 2=TVD-RK2, 3=TVD-RK3)", + "cfl_adap_dt": "Enable adaptive time stepping based on CFL", + "cfl_const_dt": "Use constant CFL for time stepping", + "cfl_target": "Target CFL number for adaptive time stepping", + "cfl_max": "Maximum allowed CFL number", + "adap_dt": "Enable adaptive time stepping", + "adap_dt_tol": "Tolerance for adaptive time stepping", + "adap_dt_max_iters": "Maximum iterations for adaptive time stepping", + + # Model equations + "model_eqns": "Model equations (1=gamma-law, 2=5-eq, 3=6-eq, 4=4-eq)", + "num_fluids": "Number of fluid components", + "num_patches": "Number of initial condition patches", + "mpp_lim": "Enable mixture pressure positivity limiter", + "mixture_err": "Enable mixture error checking", + "alt_soundspeed": "Use alternative sound speed formulation", + + # WENO reconstruction + "weno_order": "Order of WENO reconstruction (1, 3, 5, or 7)", + "weno_eps": "WENO epsilon parameter for smoothness", + "mapped_weno": "Enable mapped WENO scheme", + "wenoz": "Enable WENO-Z scheme", + "wenoz_q": "WENO-Z power parameter", + "teno": "Enable TENO scheme", + "teno_CT": "TENO cutoff parameter", + "mp_weno": "Enable monotonicity-preserving WENO", + "weno_Re_flux": "Enable WENO for viscous fluxes", + "weno_avg": "Enable WENO averaging", + "null_weights": "Allow null WENO weights", + + # MUSCL reconstruction + "recon_type": "Reconstruction type (1=WENO, 2=MUSCL)", + "muscl_order": "Order of MUSCL reconstruction", + "muscl_lim": "MUSCL limiter type", + + # Riemann solver + "riemann_solver": "Riemann solver (1=HLL, 2=HLLC, 3=exact)", + "wave_speeds": "Wave speed estimates (1=direct, 2=pressure)", + "avg_state": "Average state for Riemann solver (1=Roe, 2=arithmetic)", + "low_Mach": "Low Mach number correction", + + # Boundary conditions + "bc_x%beg": "Boundary condition at x-begin (-1=periodic, -2=reflective, -3=symmetric, etc.)", + "bc_x%end": "Boundary condition at x-end", + "bc_y%beg": "Boundary condition at y-begin", + "bc_y%end": "Boundary condition at y-end", + "bc_z%beg": "Boundary condition at z-begin", + "bc_z%end": "Boundary condition at z-end", + "num_bc_patches": "Number of boundary condition patches", + + # Physics models + "bubbles_euler": "Enable Euler-Euler bubble model", + "bubbles_lagrange": "Enable Lagrangian bubble tracking", + "bubble_model": "Bubble dynamics model (1=Gilmore, 2=Keller-Miksis, 3=Rayleigh-Plesset)", + "polytropic": "Enable polytropic gas behavior for bubbles", + "polydisperse": "Enable polydisperse bubble distribution", + "nb": "Number of bubble bins for polydisperse model", + "qbmm": "Enable quadrature-based moment method", + "R0ref": "Reference bubble radius", + "Ca": "Cavitation number", + "Web": "Weber number", + "Re_inv": "Inverse Reynolds number", + "viscous": "Enable viscous effects", + "hypoelasticity": "Enable hypoelastic model", + "hyperelasticity": "Enable hyperelastic model", + "surface_tension": "Enable surface tension effects", + "chemistry": "Enable chemical reactions", + "mhd": "Enable magnetohydrodynamics", + "hyper_cleaning": "Enable hyperbolic divergence cleaning for MHD", + "hyper_cleaning_speed": "Wave speed for hyperbolic divergence cleaning", + "hyper_cleaning_tau": "Damping time constant for hyperbolic divergence cleaning", + "relativity": "Enable special relativity", + + # Output + "run_time_info": "Output run-time information", + "prim_vars_wrt": "Write primitive variables", + "cons_vars_wrt": "Write conservative variables", + "probe_wrt": "Write probe data", + "integral_wrt": "Write integral data", + "parallel_io": "Enable parallel I/O", + "file_per_process": "Write separate file per MPI process", + "format": "Output format (1=Silo, 2=binary)", + "precision": "Output precision (1=single, 2=double)", + "schlieren_wrt": "Write schlieren images", + "rho_wrt": "Write density field", + "pres_wrt": "Write pressure field", + "vel_wrt": "Write velocity field", + "E_wrt": "Write energy field", + "gamma_wrt": "Write gamma field", + "alpha_wrt": "Write volume fraction field", + "alpha_rho_wrt": "Write partial density field", + "c_wrt": "Write sound speed field", + "omega_wrt": "Write vorticity field", + "cf_wrt": "Write color function field", + + # Immersed boundaries + "ib": "Enable immersed boundary method", + "num_ibs": "Number of immersed boundary patches", + + # Acoustic sources + "acoustic_source": "Enable acoustic source terms", + "num_source": "Number of acoustic sources", + + # Probes and integrals + "num_probes": "Number of probe points", + "num_integrals": "Number of integral regions", + + # MPI/GPU + "rdma_mpi": "Enable RDMA for MPI communication (GPUs)", + + # Misc + "case_dir": "Case directory path", + "cantera_file": "Cantera mechanism file for chemistry", + "old_grid": "Use grid from previous simulation", + "old_ic": "Use initial conditions from previous simulation", + "t_step_old": "Time step to restart from", + "fd_order": "Finite difference order for gradients", + + # Additional simple params + "thermal": "Thermal model selection", + "relax_model": "Relaxation model type", + "igr_order": "Implicit gradient reconstruction order", + "pref": "Reference pressure", + "poly_sigma": "Polydisperse distribution standard deviation", + "rhoref": "Reference density", + "sigma": "Surface tension coefficient", + "Bx0": "Background magnetic field in x-direction", + "relax": "Enable relaxation terms", + "adv_n": "Enable advection of number density", + "cont_damage": "Enable continuum damage model", + "igr": "Enable implicit gradient reconstruction", + "down_sample": "Enable output downsampling", + "perturb_flow_fluid": "Fluid index for flow perturbation", + "perturb_sph_fluid": "Fluid index for spherical perturbation", + "dist_type": "Distribution type for polydisperse bubbles", + "mixlayer_perturb_nk": "Number of perturbation modes for mixing layer", + "elliptic_smoothing_iters": "Number of elliptic smoothing iterations", + "mixlayer_vel_coef": "Velocity coefficient for mixing layer", + "mixlayer_domain": "Mixing layer domain size", + "mixlayer_perturb_k0": "Base wavenumber for mixing layer perturbation", + "perturb_flow_mag": "Magnitude of flow perturbation", + "fluid_rho": "Reference fluid density", + "sigR": "Bubble radius standard deviation", + "sigV": "Bubble velocity standard deviation", + "rhoRV": "Bubble radius-velocity correlation", + "mixlayer_vel_profile": "Enable mixing layer velocity profile", + "mixlayer_perturb": "Enable mixing layer perturbation", + "perturb_flow": "Enable flow perturbation", + "perturb_sph": "Enable spherical perturbation", + "cfl_dt": "Enable CFL-based time stepping", + "pre_stress": "Enable pre-stress initialization", + "elliptic_smoothing": "Enable elliptic smoothing", + "simplex_perturb": "Enable simplex noise perturbation", + "n_start_old": "Starting index from previous simulation", + "palpha_eps": "Volume fraction epsilon for pressure relaxation", + "ptgalpha_eps": "Volume fraction epsilon for PTG relaxation", + "pi_fac": "Pi infinity factor", + "n_start": "Starting time step index", + "tau_star": "Non-dimensional relaxation time", + "cont_damage_s": "Continuum damage shape parameter", + "alpha_bar": "Average volume fraction", + "alf_factor": "Artificial viscosity factor", + "ic_eps": "Interface compression epsilon", + "ic_beta": "Interface compression beta", + "powell": "Enable Powell source terms for MHD", + "igr_pres_lim": "Enable IGR pressure limiting", + "int_comp": "Enable interface compression", + "nv_uvm_out_of_core": "Enable NVIDIA UVM out-of-core", + "nv_uvm_pref_gpu": "Prefer GPU for NVIDIA UVM", + "nv_uvm_igr_temps_on_gpu": "Store IGR temporaries on GPU", + "num_igr_iters": "Number of IGR iterations", + "num_igr_warm_start_iters": "Number of IGR warm-start iterations", + "igr_iter_solver": "IGR iterative solver type", + "schlieren_alpha": "Schlieren alpha coefficient", + "t_tol": "Time tolerance", + "flux_lim": "Flux limiter type", + "heat_ratio_wrt": "Write heat capacity ratio field", + "pi_inf_wrt": "Write pi_inf field", + "pres_inf_wrt": "Write reference pressure field", + "qm_wrt": "Write Q-criterion field", + "liutex_wrt": "Write Liutex vortex field", + "sim_data": "Enable simulation data output", + "output_partial_domain": "Enable partial domain output", + "fft_wrt": "Enable FFT output", + "kappa_wrt": "Write curvature field", + "lag_header": "Enable Lagrangian output header", + "chem_wrt_T": "Write temperature field for chemistry", +} + +# Patterns for auto-generating descriptions of indexed parameters +PATTERNS = [ + # patch_icpp patterns + (r"patch_icpp\((\d+)\)%geometry", "Geometry type for initial condition patch {0}"), + (r"patch_icpp\((\d+)\)%x_centroid", "X-coordinate of centroid for patch {0}"), + (r"patch_icpp\((\d+)\)%y_centroid", "Y-coordinate of centroid for patch {0}"), + (r"patch_icpp\((\d+)\)%z_centroid", "Z-coordinate of centroid for patch {0}"), + (r"patch_icpp\((\d+)\)%length_x", "X-dimension length for patch {0}"), + (r"patch_icpp\((\d+)\)%length_y", "Y-dimension length for patch {0}"), + (r"patch_icpp\((\d+)\)%length_z", "Z-dimension length for patch {0}"), + (r"patch_icpp\((\d+)\)%radius", "Radius for patch {0}"), + (r"patch_icpp\((\d+)\)%radii\((\d+)\)", "Radius component {1} for patch {0}"), + (r"patch_icpp\((\d+)\)%normal\((\d+)\)", "Normal component {1} for patch {0}"), + (r"patch_icpp\((\d+)\)%vel\((\d+)\)", "Velocity component {1} for patch {0}"), + (r"patch_icpp\((\d+)\)%alpha\((\d+)\)", "Volume fraction of fluid {1} for patch {0}"), + (r"patch_icpp\((\d+)\)%alpha_rho\((\d+)\)", "Partial density of fluid {1} for patch {0}"), + (r"patch_icpp\((\d+)\)%pres", "Pressure for patch {0}"), + (r"patch_icpp\((\d+)\)%rho", "Density for patch {0}"), + (r"patch_icpp\((\d+)\)%gamma", "Specific heat ratio for patch {0}"), + (r"patch_icpp\((\d+)\)%pi_inf", "Stiffness pressure for patch {0}"), + (r"patch_icpp\((\d+)\)%smoothen", "Enable smoothing for patch {0}"), + (r"patch_icpp\((\d+)\)%smooth_patch_id", "Patch ID to smooth against for patch {0}"), + (r"patch_icpp\((\d+)\)%smooth_coeff", "Smoothing coefficient for patch {0}"), + (r"patch_icpp\((\d+)\)%alter_patch\((\d+)\)", "Alter patch {1} with patch {0}"), + (r"patch_icpp\((\d+)\)%alter_patch", "Enable patch alteration for patch {0}"), + (r"patch_icpp\((\d+)\)%Y\((\d+)\)", "Mass fraction of species {1} for patch {0}"), + (r"patch_icpp\((\d+)\)%tau_e\((\d+)\)", "Elastic stress component {1} for patch {0}"), + (r"patch_icpp\((\d+)\)%Bx", "X-component of magnetic field for patch {0}"), + (r"patch_icpp\((\d+)\)%By", "Y-component of magnetic field for patch {0}"), + (r"patch_icpp\((\d+)\)%Bz", "Z-component of magnetic field for patch {0}"), + (r"patch_icpp\((\d+)\)%model_filepath", "STL model file path for patch {0}"), + (r"patch_icpp\((\d+)\)%model_translate\((\d+)\)", "Model translation component {1} for patch {0}"), + (r"patch_icpp\((\d+)\)%model_scale\((\d+)\)", "Model scale component {1} for patch {0}"), + (r"patch_icpp\((\d+)\)%model_rotate\((\d+)\)", "Model rotation component {1} for patch {0}"), + (r"patch_icpp\((\d+)\)%model_threshold", "Model threshold for patch {0}"), + (r"patch_icpp\((\d+)\)%epsilon", "Interface thickness for patch {0}"), + (r"patch_icpp\((\d+)\)%beta", "Shape parameter beta for patch {0}"), + (r"patch_icpp\((\d+)\)%a\((\d+)\)", "Shape coefficient a({1}) for patch {0}"), + (r"patch_icpp\((\d+)\)%cf_val", "Color function value for patch {0}"), + (r"patch_icpp\((\d+)\)%cv", "Specific heat at constant volume for patch {0}"), + (r"patch_icpp\((\d+)\)%qv", "Heat of formation for patch {0}"), + (r"patch_icpp\((\d+)\)%qvp", "Heat of formation prime for patch {0}"), + (r"patch_icpp\((\d+)\)%hcid", "Hard-coded patch ID for patch {0}"), + (r"patch_icpp\((\d+)\)%model_spc", "Model spacing for patch {0}"), + (r"patch_icpp\((\d+)\)%non_axis_sym", "Non-axisymmetric parameter for patch {0}"), + (r"patch_icpp\((\d+)\)%r0", "Initial bubble radius for patch {0}"), + (r"patch_icpp\((\d+)\)%v0", "Initial bubble velocity for patch {0}"), + (r"patch_icpp\((\d+)\)%p0", "Initial bubble pressure for patch {0}"), + (r"patch_icpp\((\d+)\)%m0", "Initial bubble mass for patch {0}"), + (r"patch_icpp\((\d+)\)%vel", "Velocity magnitude for patch {0}"), + (r"patch_icpp\((\d+)\)%alpha", "Volume fraction for patch {0}"), + (r"patch_icpp\((\d+)\)%alpha_rho", "Partial density for patch {0}"), + (r"patch_icpp\((\d+)\)%radii", "Radii for patch {0}"), + (r"patch_icpp\((\d+)\)%normal", "Normal direction for patch {0}"), + + # fluid_pp patterns + (r"fluid_pp\((\d+)\)%gamma", "Specific heat ratio for fluid {0}"), + (r"fluid_pp\((\d+)\)%pi_inf", "Stiffness pressure for fluid {0}"), + (r"fluid_pp\((\d+)\)%G", "Shear modulus for fluid {0}"), + (r"fluid_pp\((\d+)\)%cv", "Specific heat at constant volume for fluid {0}"), + (r"fluid_pp\((\d+)\)%qv", "Heat of formation for fluid {0}"), + (r"fluid_pp\((\d+)\)%qvp", "Heat of formation prime for fluid {0}"), + (r"fluid_pp\((\d+)\)%Re\((\d+)\)", "Reynolds number component {1} for fluid {0}"), + (r"fluid_pp\((\d+)\)%mul0", "Reference liquid viscosity for fluid {0}"), + (r"fluid_pp\((\d+)\)%ss", "Surface tension for fluid {0}"), + (r"fluid_pp\((\d+)\)%pv", "Vapor pressure for fluid {0}"), + (r"fluid_pp\((\d+)\)%gamma_v", "Specific heat ratio of vapor phase for fluid {0}"), + (r"fluid_pp\((\d+)\)%M_v", "Molecular weight of vapor phase for fluid {0}"), + (r"fluid_pp\((\d+)\)%mu_v", "Viscosity of vapor phase for fluid {0}"), + (r"fluid_pp\((\d+)\)%k_v", "Thermal conductivity of vapor phase for fluid {0}"), + (r"fluid_pp\((\d+)\)%cp_v", "Specific heat capacity (const. pressure) of vapor for fluid {0}"), + (r"fluid_pp\((\d+)\)%D_v", "Vapor mass diffusivity for fluid {0}"), + + # patch_ib patterns + (r"patch_ib\((\d+)\)%geometry", "Geometry type for immersed boundary {0}"), + (r"patch_ib\((\d+)\)%x_centroid", "X-coordinate of centroid for IB patch {0}"), + (r"patch_ib\((\d+)\)%y_centroid", "Y-coordinate of centroid for IB patch {0}"), + (r"patch_ib\((\d+)\)%z_centroid", "Z-coordinate of centroid for IB patch {0}"), + (r"patch_ib\((\d+)\)%length_x", "X-dimension length for IB patch {0}"), + (r"patch_ib\((\d+)\)%length_y", "Y-dimension length for IB patch {0}"), + (r"patch_ib\((\d+)\)%length_z", "Z-dimension length for IB patch {0}"), + (r"patch_ib\((\d+)\)%radius", "Radius for IB patch {0}"), + (r"patch_ib\((\d+)\)%theta", "Theta angle for IB patch {0}"), + (r"patch_ib\((\d+)\)%c", "Shape parameter c for IB patch {0}"), + (r"patch_ib\((\d+)\)%p", "Shape parameter p for IB patch {0}"), + (r"patch_ib\((\d+)\)%t", "Shape parameter t for IB patch {0}"), + (r"patch_ib\((\d+)\)%m", "Shape parameter m for IB patch {0}"), + (r"patch_ib\((\d+)\)%mass", "Mass for IB patch {0}"), + (r"patch_ib\((\d+)\)%vel\((\d+)\)", "Velocity component {1} for IB patch {0}"), + (r"patch_ib\((\d+)\)%angular_vel\((\d+)\)", "Angular velocity component {1} for IB patch {0}"), + (r"patch_ib\((\d+)\)%angles\((\d+)\)", "Orientation angle {1} for IB patch {0}"), + (r"patch_ib\((\d+)\)%slip", "Enable slip condition for IB patch {0}"), + (r"patch_ib\((\d+)\)%moving_ibm", "Enable moving boundary for IB patch {0}"), + (r"patch_ib\((\d+)\)%model_filepath", "STL model file path for IB patch {0}"), + (r"patch_ib\((\d+)\)%model_spc", "Model spacing for IB patch {0}"), + (r"patch_ib\((\d+)\)%model_threshold", "Model threshold for IB patch {0}"), + (r"patch_ib\((\d+)\)%model_translate\((\d+)\)", "Model translation component {1} for IB patch {0}"), + (r"patch_ib\((\d+)\)%model_scale\((\d+)\)", "Model scale component {1} for IB patch {0}"), + (r"patch_ib\((\d+)\)%model_rotate\((\d+)\)", "Model rotation component {1} for IB patch {0}"), + + # bc patterns + (r"bc_([xyz])%vel_in\((\d+)\)", "Inlet velocity component {1} at {0}-boundary"), + (r"bc_([xyz])%vel_out\((\d+)\)", "Outlet velocity component {1} at {0}-boundary"), + (r"bc_([xyz])%alpha_rho_in\((\d+)\)", "Inlet partial density of fluid {1} at {0}-boundary"), + (r"bc_([xyz])%alpha_in\((\d+)\)", "Inlet volume fraction of fluid {1} at {0}-boundary"), + (r"bc_([xyz])%pres_in", "Inlet pressure at {0}-boundary"), + (r"bc_([xyz])%pres_out", "Outlet pressure at {0}-boundary"), + (r"bc_([xyz])%vb(\d+)", "Boundary velocity component {1} at {0}-begin"), + (r"bc_([xyz])%ve(\d+)", "Boundary velocity component {1} at {0}-end"), + (r"bc_([xyz])%grcbc_in", "Enable GRCBC at {0}-inlet"), + (r"bc_([xyz])%grcbc_out", "Enable GRCBC at {0}-outlet"), + (r"bc_([xyz])%grcbc_vel_out", "Enable GRCBC velocity at {0}-outlet"), + + # patch_bc patterns + (r"patch_bc\((\d+)\)%geometry", "Geometry type for BC patch {0}"), + (r"patch_bc\((\d+)\)%type", "BC type for patch {0}"), + (r"patch_bc\((\d+)\)%dir", "Direction for BC patch {0}"), + (r"patch_bc\((\d+)\)%loc", "Location for BC patch {0}"), + (r"patch_bc\((\d+)\)%centroid\((\d+)\)", "Centroid component {1} for BC patch {0}"), + (r"patch_bc\((\d+)\)%length\((\d+)\)", "Length component {1} for BC patch {0}"), + (r"patch_bc\((\d+)\)%radius", "Radius for BC patch {0}"), + + # acoustic patterns + (r"acoustic\((\d+)\)%loc\((\d+)\)", "Location component {1} for acoustic source {0}"), + (r"acoustic\((\d+)\)%mag", "Magnitude for acoustic source {0}"), + (r"acoustic\((\d+)\)%pulse", "Pulse type for acoustic source {0}"), + (r"acoustic\((\d+)\)%support", "Support type for acoustic source {0}"), + (r"acoustic\((\d+)\)%frequency", "Frequency for acoustic source {0}"), + (r"acoustic\((\d+)\)%wavelength", "Wavelength for acoustic source {0}"), + (r"acoustic\((\d+)\)%length", "Length for acoustic source {0}"), + (r"acoustic\((\d+)\)%height", "Height for acoustic source {0}"), + (r"acoustic\((\d+)\)%delay", "Delay for acoustic source {0}"), + (r"acoustic\((\d+)\)%dipole", "Enable dipole for acoustic source {0}"), + (r"acoustic\((\d+)\)%dir", "Direction for acoustic source {0}"), + (r"acoustic\((\d+)\)%npulse", "Number of pulses for acoustic source {0}"), + (r"acoustic\((\d+)\)%gauss_sigma_dist", "Gaussian spatial width for acoustic source {0}"), + (r"acoustic\((\d+)\)%gauss_sigma_time", "Gaussian temporal width for acoustic source {0}"), + (r"acoustic\((\d+)\)%num_elements", "Number of array elements for acoustic source {0}"), + (r"acoustic\((\d+)\)%element_on", "Active element index for acoustic source {0}"), + (r"acoustic\((\d+)\)%element_spacing_angle", "Element spacing angle for acoustic source {0}"), + (r"acoustic\((\d+)\)%element_polygon_ratio", "Element polygon ratio for acoustic source {0}"), + (r"acoustic\((\d+)\)%foc_length", "Focal length for acoustic source {0}"), + (r"acoustic\((\d+)\)%aperture", "Aperture for acoustic source {0}"), + (r"acoustic\((\d+)\)%rotate_angle", "Rotation angle for acoustic source {0}"), + (r"acoustic\((\d+)\)%bb_num_freq", "Number of broadband frequencies for source {0}"), + (r"acoustic\((\d+)\)%bb_bandwidth", "Broadband bandwidth for acoustic source {0}"), + (r"acoustic\((\d+)\)%bb_lowest_freq", "Lowest broadband frequency for source {0}"), + + # probe patterns + (r"probe\((\d+)\)%x", "X-coordinate of probe {0}"), + (r"probe\((\d+)\)%y", "Y-coordinate of probe {0}"), + (r"probe\((\d+)\)%z", "Z-coordinate of probe {0}"), + + # integral patterns + (r"integral\((\d+)\)%xmin", "X-min of integral region {0}"), + (r"integral\((\d+)\)%xmax", "X-max of integral region {0}"), + (r"integral\((\d+)\)%ymin", "Y-min of integral region {0}"), + (r"integral\((\d+)\)%ymax", "Y-max of integral region {0}"), + (r"integral\((\d+)\)%zmin", "Z-min of integral region {0}"), + (r"integral\((\d+)\)%zmax", "Z-max of integral region {0}"), + + # bub_pp patterns + (r"bub_pp%R0ref", "Reference bubble radius"), + (r"bub_pp%p0ref", "Reference pressure for bubbles"), + (r"bub_pp%rho0ref", "Reference density for bubbles"), + (r"bub_pp%T0ref", "Reference temperature for bubbles"), + (r"bub_pp%ss", "Surface tension between host and gas (bubble)"), + (r"bub_pp%pv", "Vapor pressure of host fluid"), + (r"bub_pp%vd", "Vapor diffusion coefficient"), + (r"bub_pp%mu_l", "Viscosity of host in liquid state"), + (r"bub_pp%mu_v", "Viscosity of host in vapor state"), + (r"bub_pp%mu_g", "Viscosity of gas (bubble)"), + (r"bub_pp%gam_v", "Specific heat ratio of host in vapor state"), + (r"bub_pp%gam_g", "Specific heat ratio of gas (bubble)"), + (r"bub_pp%M_v", "Molecular weight of host vapor"), + (r"bub_pp%M_g", "Molecular weight of gas (bubble)"), + (r"bub_pp%k_v", "Thermal conductivity of host in vapor state"), + (r"bub_pp%k_g", "Thermal conductivity of gas (bubble)"), + (r"bub_pp%cp_v", "Specific heat (const. pressure) of host vapor"), + (r"bub_pp%cp_g", "Specific heat (const. pressure) of gas (bubble)"), + (r"bub_pp%R_v", "Gas constant of host in vapor state"), + (r"bub_pp%R_g", "Gas constant of gas (bubble)"), + (r"bub_pp%(\w+)", "Bubble parameter: {0}"), + + # Output array patterns + (r"schlieren_alpha\((\d+)\)", "Schlieren coefficient for fluid {0}"), + (r"alpha_rho_wrt\((\d+)\)", "Write partial density for fluid {0}"), + (r"alpha_wrt\((\d+)\)", "Write volume fraction for fluid {0}"), + (r"alpha_rho_e_wrt\((\d+)\)", "Write partial energy for fluid {0}"), + (r"kappa_wrt\((\d+)\)", "Write curvature for fluid {0}"), + (r"mom_wrt\((\d+)\)", "Write momentum component {0}"), + (r"vel_wrt\((\d+)\)", "Write velocity component {0}"), + (r"flux_wrt\((\d+)\)", "Write flux component {0}"), + (r"omega_wrt\((\d+)\)", "Write vorticity component {0}"), + (r"chem_wrt_Y\((\d+)\)", "Write mass fraction of species {0}"), + + # Lagrangian output patterns - specific fields first + (r"lag_pos_wrt", "Write Lagrangian bubble position"), + (r"lag_pos_prev_wrt", "Write Lagrangian bubble previous position"), + (r"lag_vel_wrt", "Write Lagrangian bubble velocity"), + (r"lag_rvel_wrt", "Write Lagrangian bubble radial velocity"), + (r"lag_rad_wrt", "Write Lagrangian bubble radius"), + (r"lag_r0_wrt", "Write Lagrangian initial bubble radius"), + (r"lag_rmax_wrt", "Write Lagrangian max bubble radius"), + (r"lag_rmin_wrt", "Write Lagrangian min bubble radius"), + (r"lag_pres_wrt", "Write Lagrangian bubble pressure"), + (r"lag_mv_wrt", "Write Lagrangian vapor mass"), + (r"lag_mg_wrt", "Write Lagrangian gas mass"), + (r"lag_db_wrt", "Write Lagrangian bubble diameter"), + (r"lag_dphidt_wrt", "Write Lagrangian void fraction time derivative"), + (r"lag_betaT_wrt", "Write Lagrangian thermal beta coefficient"), + (r"lag_betaC_wrt", "Write Lagrangian concentration beta coefficient"), + (r"lag_id_wrt", "Write Lagrangian bubble ID"), + (r"lag_txt_wrt", "Write Lagrangian data to text files"), + (r"lag_(\w+)_wrt", "Write Lagrangian {0} field"), + + # Body force patterns + (r"([kgwp])_([xyz])", "Body force parameter {0} in {1}-direction"), + (r"bf_([xyz])", "Enable body force in {0}-direction"), + + # simplex patterns + (r"simplex_params%perturb_dens\((\d+)\)", "Enable density perturbation for fluid {0}"), + (r"simplex_params%perturb_dens_freq\((\d+)\)", "Density perturbation frequency for fluid {0}"), + (r"simplex_params%perturb_dens_scale\((\d+)\)", "Density perturbation scale for fluid {0}"), + (r"simplex_params%perturb_dens_offset\((\d+),\s*(\d+)\)", "Density perturbation offset ({1}) for fluid {0}"), + (r"simplex_params%perturb_vel\((\d+)\)", "Enable velocity perturbation for direction {0}"), + (r"simplex_params%perturb_vel_freq\((\d+)\)", "Velocity perturbation frequency for direction {0}"), + (r"simplex_params%perturb_vel_scale\((\d+)\)", "Velocity perturbation scale for direction {0}"), + (r"simplex_params%perturb_vel_offset\((\d+),(\d+)\)", "Velocity perturbation offset ({1}) for direction {0}"), + + # lag_params patterns - specific fields first + (r"lag_params%solver_approach", "Lagrangian solver approach (1=one-way, 2=two-way coupling)"), + (r"lag_params%cluster_type", "Cluster model for pressure at infinity"), + (r"lag_params%pressure_corrector", "Enable cell pressure correction for Lagrangian bubbles"), + (r"lag_params%smooth_type", "Smoothing function type (1=Gaussian, 2=Delta 3x3)"), + (r"lag_params%heatTransfer_model", "Enable heat transfer at bubble-liquid interface"), + (r"lag_params%massTransfer_model", "Enable mass transfer at bubble-liquid interface"), + (r"lag_params%write_bubbles", "Write bubble evolution data each time step"), + (r"lag_params%write_bubbles_stats", "Write max/min radius statistics for bubbles"), + (r"lag_params%nBubs_glb", "Global number of Lagrangian bubbles"), + (r"lag_params%epsilonb", "Standard deviation scaling for Gaussian smoothing"), + (r"lag_params%charwidth", "Domain virtual depth for 2D simulations"), + (r"lag_params%valmaxvoid", "Maximum permitted void fraction"), + (r"lag_params%T0", "Initial bubble temperature"), + (r"lag_params%Thost", "Host fluid temperature"), + (r"lag_params%c0", "Initial sound speed"), + (r"lag_params%rho0", "Initial density"), + (r"lag_params%x0", "Initial bubble position"), + (r"lag_params%(\w+)", "Lagrangian tracking parameter: {0}"), + + # chem_params patterns - specific fields first + (r"chem_params%diffusion", "Enable species diffusion for chemistry"), + (r"chem_params%reactions", "Enable chemical reactions"), + (r"chem_params%gamma_method", "Gamma calculation method (1=formulation, 2=cp/cv ratio)"), + (r"chem_params%transport_model", "Transport model selection for chemistry"), + (r"chem_params%(\w+)", "Chemistry parameter: {0}"), + + # fluid_rho patterns + (r"fluid_rho\((\d+)\)", "Reference density for fluid {0}"), +] + + +def get_description(param_name: str) -> str: + """Get description for a parameter from registry or fallback sources.""" + # Primary source: ParamDef.description from registry + from . import REGISTRY # pylint: disable=import-outside-toplevel + param = REGISTRY.all_params.get(param_name) + if param and param.description: + return param.description + + # Fallback 1: manual descriptions dict (legacy, will be removed) + if param_name in DESCRIPTIONS: + return DESCRIPTIONS[param_name] + + # Fallback 2: pattern matching for indexed params + for pattern, template in PATTERNS: + match = re.fullmatch(pattern, param_name) + if match: + return template.format(*match.groups()) + + # Fallback 3: naming convention inference + return _infer_from_naming(param_name) + + +def _infer_from_naming(param_name: str) -> str: # pylint: disable=too-many-return-statements,too-many-branches + """Infer description from naming conventions.""" + name = param_name + + # Handle nested params (e.g., simplex_params%perturb_dens_offset) + if "%" in name: + parts = name.split("%") + prefix = parts[0] + suffix = parts[1] + + # Extract prefix context + prefix_map = { + "simplex_params": "Simplex noise", + "lag_params": "Lagrangian tracking", + "chem_params": "Chemistry", + "bub_pp": "Bubble", + "x_output": "X-direction output", + "y_output": "Y-direction output", + "z_output": "Z-direction output", + } + + # Remove index from prefix if present + base_prefix = re.sub(r"\(\d+\)", "", prefix) + context = prefix_map.get(base_prefix, "") + + # Handle common suffix patterns + if suffix.endswith("_wrt"): + field = suffix[:-4].replace("_", " ") + return f"Write {field} output" + (f" ({context})" if context else "") + + if "offset" in suffix: + return "Offset parameter" + (f" for {context}" if context else "") + + if context: + # Clean up suffix for display + clean_suffix = re.sub(r"\(\d+\)", "", suffix).replace("_", " ") + return f"{context} {clean_suffix} parameter" + + # Handle *_wrt patterns (write flags) + if name.endswith("_wrt"): + field = name[:-4].replace("_", " ") + return f"Write {field} to output" + + # Handle num_* patterns + if name.startswith("num_"): + thing = name[4:].replace("_", " ") + return f"Number of {thing}" + + # Handle *_order patterns + if name.endswith("_order"): + thing = name[:-6].replace("_", " ") + return f"Order of {thing}" + + # Handle *_model patterns + if name.endswith("_model"): + thing = name[:-6].replace("_", " ") + return f"{thing.title()} model selection" + + # Handle *_tol patterns + if name.endswith("_tol"): + thing = name[:-4].replace("_", " ") + return f"Tolerance for {thing}" + + # Handle *_eps patterns + if name.endswith("_eps"): + thing = name[:-4].replace("_", " ") + return f"Epsilon parameter for {thing}" + + # Handle *_coef or *_coeff patterns + if name.endswith("_coef") or name.endswith("_coeff"): + thing = name.rsplit("_", 1)[0].replace("_", " ") + return f"Coefficient for {thing}" + + # Handle *_max / *_min patterns + if name.endswith("_max"): + thing = name[:-4].replace("_", " ") + return f"Maximum {thing}" + if name.endswith("_min"): + thing = name[:-4].replace("_", " ") + return f"Minimum {thing}" + + # Handle *%beg / *%end patterns + if name.endswith("%beg"): + thing = name[:-4].replace("_", " ").replace("%", " ") + return f"Beginning value for {thing}" + if name.endswith("%end"): + thing = name[:-4].replace("_", " ").replace("%", " ") + return f"End value for {thing}" + + return "" + + +def get_pattern_description(pattern_name: str) -> str: + """Get description for a collapsed pattern like patch_icpp(N)%geometry.""" + # Convert pattern back to example: patch_icpp(N)%geometry -> patch_icpp(1)%geometry + # Use different placeholder values so we can distinguish them later + example = pattern_name.replace("(N)", "(1)").replace("(M)", "(2)").replace("(K)", "(3)") + desc = get_description(example) + + if desc: + # Replace specific index values with generic labels + # First, handle the secondary index (2 -> M) + desc = re.sub(r"(species|fluid|component|direction) 2", r"\1 M", desc) + desc = re.sub(r"component 2", "component M", desc) + # Then handle primary index (1 -> N) + desc = re.sub(r"(patch|fluid|IB patch|source|probe|region|species|direction|component) 1", r"\1 N", desc) + # Generic fallback for space-separated indices + desc = re.sub(r" 1([,\s]|$)", r" N\1", desc) + desc = re.sub(r" 2([,\s]|$)", r" M\1", desc) + desc = re.sub(r" 3([,\s]|$)", r" K\1", desc) + # Handle parenthesized indices (e.g., "a(2)" -> "a(M)") + desc = re.sub(r"\(1\)", "(N)", desc) + desc = re.sub(r"\(2\)", "(M)", desc) + desc = re.sub(r"\(3\)", "(K)", desc) + + return desc + + +# Feature group descriptions (for display purposes) +# The actual parameter-to-tag mapping is in definitions.py (single source of truth) +FEATURE_DESCRIPTIONS = { + "mhd": "Magnetohydrodynamics parameters", + "bubbles": "Bubble dynamics and cavitation", + "viscosity": "Viscous flow parameters", + "weno": "WENO reconstruction scheme", + "time": "Time stepping and integration", + "output": "Output and visualization", + "chemistry": "Chemical reactions and species transport", + "elasticity": "Elastic and hyperelastic materials", + "acoustic": "Acoustic sources and wave generation", + "ib": "Immersed boundary method", + "grid": "Computational grid and domain", + "bc": "Boundary conditions", + "riemann": "Riemann solver settings", + "probes": "Probe points and integral regions", + "surface_tension": "Surface tension and interface", + "relativity": "Special relativity", +} diff --git a/toolchain/mfc/params/errors.py b/toolchain/mfc/params/errors.py new file mode 100644 index 0000000000..ed885f8393 --- /dev/null +++ b/toolchain/mfc/params/errors.py @@ -0,0 +1,230 @@ +""" +Consistent Error Message Formatting for MFC Parameter Validation. + +Provides utility functions for creating consistent, user-friendly error +messages across all validation systems (params/validate.py, case_validator.py, +and JSON schema validation). + +Error Message Format +-------------------- +All error messages follow this structure: +- Parameter name in single quotes: 'param_name' +- Clear description of the problem +- Current value if relevant: got +- Expected value/range if relevant: expected + +Examples: +- "'weno_order' must be one of [1, 3, 5, 7], got 4" +- "'bubbles_euler'=T requires 'nb' to be set" +- "'m' must be >= 0, got -1" +""" + +from typing import Any, List, Optional + + +def format_param(name: str) -> str: + """Format a parameter name for error messages.""" + return f"'{name}'" + + +def format_value(value: Any) -> str: + """Format a value for error messages.""" + if isinstance(value, str): + return f"'{value}'" + return str(value) + + +def constraint_error( + param: str, + constraint_type: str, + expected: Any, + got: Any, +) -> str: + """ + Create a constraint violation error message. + + Args: + param: Parameter name + constraint_type: Type of constraint ('choices', 'min', 'max') + expected: Expected constraint value + got: Actual value received + + Returns: + Formatted error message. + """ + if constraint_type == "choices": + return f"{format_param(param)} must be one of {expected}, got {format_value(got)}" + if constraint_type == "min": + return f"{format_param(param)} must be >= {expected}, got {format_value(got)}" + if constraint_type == "max": + return f"{format_param(param)} must be <= {expected}, got {format_value(got)}" + return f"{format_param(param)} constraint '{constraint_type}' violated: expected {expected}, got {format_value(got)}" + + +def type_error(param: str, expected_type: str, got: Any) -> str: + """ + Create a type mismatch error message. + + Args: + param: Parameter name + expected_type: Expected type description + got: Actual value received + + Returns: + Formatted error message. + """ + return f"{format_param(param)} must be {expected_type}, got {format_value(got)}" + + +def dependency_error( + param: str, + required_param: str, + condition: Optional[str] = None, +) -> str: + """ + Create a missing dependency error message. + + Args: + param: Parameter that has the dependency + required_param: Parameter that is required + condition: Optional condition (e.g., "=T", "> 0") + + Returns: + Formatted error message. + """ + if condition: + return f"{format_param(param)}{condition} requires {format_param(required_param)} to be set" + return f"{format_param(param)} requires {format_param(required_param)} to be set" + + +def dependency_recommendation( + param: str, + recommended_param: str, + condition: Optional[str] = None, +) -> str: + """ + Create a dependency recommendation message. + + Args: + param: Parameter that has the recommendation + recommended_param: Parameter that is recommended + condition: Optional condition + + Returns: + Formatted recommendation message. + """ + if condition: + return f"{format_param(param)}{condition}: consider setting {format_param(recommended_param)}" + return f"When {format_param(param)} is set, consider also setting {format_param(recommended_param)}" + + +def required_error(param: str, context: Optional[str] = None) -> str: + """ + Create a missing required parameter error message. + + Args: + param: Required parameter name + context: Optional context (e.g., "when m > 0") + + Returns: + Formatted error message. + """ + if context: + return f"{format_param(param)} must be set {context}" + return f"{format_param(param)} must be set" + + +def mutual_exclusion_error(params: List[str], active: List[str]) -> str: + """ + Create a mutual exclusion error message. + + Args: + params: List of mutually exclusive parameters + active: List of parameters that are currently active (conflicting) + + Returns: + Formatted error message. + """ + formatted = [format_param(p) for p in params] + active_formatted = [format_param(p) for p in active] + return ( + f"Only one of {', '.join(formatted)} can be enabled, " + f"but {', '.join(active_formatted)} are all enabled" + ) + + +def dimension_error(param: str, requirement: str) -> str: + """ + Create a dimensionality constraint error. + + Args: + param: Parameter name + requirement: Description of the dimensional requirement + + Returns: + Formatted error message. + """ + return f"{format_param(param)}: {requirement}" + + +def unknown_param_error(param: str, suggestions: Optional[List[str]] = None) -> str: + """ + Create an error message for an unknown parameter with suggestions. + + Args: + param: The unknown parameter name. + suggestions: Optional list of similar valid parameter names. + + Returns: + Formatted error message with "Did you mean?" if suggestions available. + """ + base_msg = f"Unknown parameter {format_param(param)}" + if suggestions: + if len(suggestions) == 1: + return f"{base_msg}. Did you mean {format_param(suggestions[0])}?" + quoted = [format_param(s) for s in suggestions] + return f"{base_msg}. Did you mean one of: {', '.join(quoted)}?" + return base_msg + + +def format_error_list( + errors: List[str], + warnings: Optional[List[str]] = None, + use_rich: bool = True, +) -> str: + """ + Format a list of errors and warnings for display. + + Args: + errors: List of error messages + warnings: Optional list of warning messages + use_rich: Whether to use Rich markup for colors + + Returns: + Formatted string with errors and warnings. + """ + lines = [] + + if errors: + if use_rich: + lines.append("[red]Validation Errors:[/red]") + for err in errors: + lines.append(f" [red]✗[/red] {err}") + else: + lines.append("Validation Errors:") + for err in errors: + lines.append(f" ✗ {err}") + + if warnings: + if lines: + lines.append("") + if use_rich: + lines.append("[yellow]Warnings:[/yellow]") + for warn in warnings: + lines.append(f" [yellow]![/yellow] {warn}") + else: + lines.append("Warnings:") + for warn in warnings: + lines.append(f" ! {warn}") + + return "\n".join(lines) diff --git a/toolchain/mfc/params/generators/__init__.py b/toolchain/mfc/params/generators/__init__.py new file mode 100644 index 0000000000..b06572cf5c --- /dev/null +++ b/toolchain/mfc/params/generators/__init__.py @@ -0,0 +1,6 @@ +"""Code Generators for Parameter Schema.""" + +from .json_schema_gen import generate_json_schema +from .docs_gen import generate_parameter_docs + +__all__ = ['generate_json_schema', 'generate_parameter_docs'] diff --git a/toolchain/mfc/params/generators/docs_gen.py b/toolchain/mfc/params/generators/docs_gen.py new file mode 100644 index 0000000000..7699a12c08 --- /dev/null +++ b/toolchain/mfc/params/generators/docs_gen.py @@ -0,0 +1,302 @@ +""" +Parameter Documentation Generator. + +Generates markdown documentation for all MFC case parameters, +organized by family with descriptions, types, and constraints. +""" + +from typing import Any, Dict, List, Tuple +from collections import defaultdict +import re + +from ..schema import ParamType +from ..registry import REGISTRY +from ..descriptions import get_description +from .. import definitions # noqa: F401 pylint: disable=unused-import + + +def _get_family(name: str) -> str: + """Extract family name from parameter (e.g., 'patch_icpp' from 'patch_icpp(1)%vel(1)').""" + # Handle indexed parameters + match = re.match(r'^([a-z_]+)', name) + if match: + base = match.group(1) + # Check if it's a known family pattern + if any(name.startswith(f"{base}(") or name.startswith(f"{base}%") for _ in [1]): + return base + return "general" + + +def _escape_percent(s: str) -> str: + """Escape % for Doxygen (% is a special character, use %% to get literal %).""" + return s.replace('%', '%%') + + +def _parse_paren_content(name: str, start: int) -> Tuple[str, int]: + """Parse content within parentheses, return (content, end_index) or ('', -1) if invalid.""" + j = start + 1 + paren_content = [] + while j < len(name) and name[j] != ')': + paren_content.append(name[j]) + j += 1 + if j < len(name): + return ''.join(paren_content), j + return '', -1 + + +def _collapse_indices(name: str) -> str: + """ + Collapse numeric indices to placeholders for pattern grouping. + + Examples: + patch_icpp(1)%vel(2) -> patch_icpp(N)%vel(M) + simplex_params%perturb_dens_offset(1, 2) -> simplex_params%perturb_dens_offset(N, M) + bc_x%vel_in(1) -> bc_x%vel_in(N) + """ + placeholders = ['N', 'M', 'K', 'L', 'P', 'Q'] + placeholder_idx = 0 + result = [] + i = 0 + + while i < len(name): + if name[i] != '(': + result.append(name[i]) + i += 1 + continue + + # Found opening paren, look for indices + content, end_idx = _parse_paren_content(name, i) + if end_idx == -1: + result.append(name[i]) + i += 1 + continue + + # Check if content is numeric indices (possibly comma-separated) + parts = [p.strip() for p in content.split(',')] + if not all(p.isdigit() for p in parts): + result.append(name[i]) + i += 1 + continue + + # Replace each index with a placeholder + new_parts = [] + for _ in parts: + ph = placeholders[placeholder_idx] if placeholder_idx < len(placeholders) else '?' + new_parts.append(ph) + placeholder_idx += 1 + result.append('(' + ', '.join(new_parts) + ')') + i = end_idx + 1 + + return ''.join(result) + + +def _type_to_str(param_type: ParamType) -> str: + """Convert ParamType to readable string.""" + return { + ParamType.INT: "Integer", + ParamType.REAL: "Real", + ParamType.LOG: "Logical (T/F)", + ParamType.STR: "String", + ParamType.ANALYTIC_INT: "Integer or Expression", + ParamType.ANALYTIC_REAL: "Real or Expression", + }.get(param_type, str(param_type)) + + +def _format_constraints(param) -> str: + """Format constraints as readable string.""" + if not param.constraints: + return "" + + parts = [] + c = param.constraints + if "choices" in c: + parts.append(f"Values: {c['choices']}") + if "min" in c: + parts.append(f"Min: {c['min']}") + if "max" in c: + parts.append(f"Max: {c['max']}") + + return ", ".join(parts) + + +def generate_parameter_docs() -> str: # pylint: disable=too-many-locals,too-many-statements + """Generate markdown documentation for all parameters.""" + lines = [ + "@page parameters Case Parameters Reference", + "", + "# Case Parameters Reference", + "", + "> **Auto-generated** from parameter registry", + "> ", + "> Regenerate with: `./mfc.sh generate --json-schema`", + "", + "## Overview", + "", + f"MFC supports **{len(REGISTRY.all_params):,}** case parameters organized into families.", + "", + "**Quick search:** Use `./mfc.sh params ` to search parameters from the command line.", + "", + "## Parameter Families", + "", + ] + + # Group parameters by family + families: Dict[str, List[Tuple[str, Any]]] = defaultdict(list) + for name, param in sorted(REGISTRY.all_params.items()): + family = _get_family(name) + families[family].append((name, param)) + + # Sort families by size (largest first), but put "general" last + sorted_families = sorted( + families.items(), + key=lambda x: (x[0] == "general", -len(x[1]), x[0]) + ) + + # Table of contents + lines.append("| Family | Count | Description |") + lines.append("|--------|-------|-------------|") + + family_descriptions = { + "general": "Core simulation parameters (grid, time, model, etc.)", + "patch_icpp": "Initial condition patch parameters", + "patch_ib": "Immersed boundary patch parameters", + "patch_bc": "Boundary condition patch parameters", + "fluid_pp": "Fluid material properties", + "acoustic": "Acoustic source parameters", + "bc_x": "X-direction boundary conditions", + "bc_y": "Y-direction boundary conditions", + "bc_z": "Z-direction boundary conditions", + "probe": "Probe/monitoring point parameters", + "integral": "Integral region parameters", + "simplex_params": "Simplex noise perturbation parameters", + "chem_wrt_Y": "Chemistry species output parameters", + "bub_pp": "Bubble property parameters", + "lag_params": "Lagrangian particle parameters", + # Post-processing output flags + "alpha_rho_wrt": "Partial density output flags", + "alpha_rho_e_wrt": "Partial density-energy output flags", + "alpha_wrt": "Volume fraction output flags", + "kappa_wrt": "Curvature output flags", + "schlieren_alpha": "Numerical schlieren coefficients", + "mom_wrt": "Momentum output flags", + "vel_wrt": "Velocity output flags", + "flux_wrt": "Flux output flags", + "omega_wrt": "Vorticity output flags", + # Domain and output regions + "x_domain": "X-direction domain parameters", + "y_domain": "Y-direction domain parameters", + "z_domain": "Z-direction domain parameters", + "x_output": "X-direction output region", + "y_output": "Y-direction output region", + "z_output": "Z-direction output region", + # Other + "fluid_rho": "Fluid reference densities", + "chem_params": "Chemistry model parameters", + } + + for family, params in sorted_families: + desc = family_descriptions.get(family, "") + # Use family name directly as anchor (GitHub keeps underscores in heading anchors) + lines.append(f"| [{family}](#{family}) | {len(params)} | {desc} |") + + lines.append("") + lines.append("---") + lines.append("") + + # Document each family + for family, params in sorted_families: + lines.append(f"## {family}") + lines.append("") + + desc = family_descriptions.get(family, "") + if desc: + lines.append(f"*{desc}*") + lines.append("") + + lines.append(f"**{len(params)} parameters**") + lines.append("") + + # Group by pattern (collapse indices to N, M, etc.) + patterns: Dict[str, List[str]] = defaultdict(list) + for name, _ in params: + pattern = _collapse_indices(name) + patterns[pattern].append(name) + + # Use pattern view if it reduces rows, otherwise show full table + if len(patterns) < len(params): + # Pattern view - shows collapsed patterns + lines.append("### Patterns") + lines.append("") + lines.append("| Pattern | Example | Description |") + lines.append("|---------|---------|-------------|") + + for pattern, examples in sorted(patterns.items()): + example = examples[0] + desc = get_description(example) or "" + # Truncate long descriptions + if len(desc) > 60: + desc = desc[:57] + "..." + # Escape % for Doxygen + pattern_escaped = _escape_percent(pattern) + example_escaped = _escape_percent(example) + lines.append(f"| `{pattern_escaped}` | `{example_escaped}` | {desc} |") + + lines.append("") + else: + # Full table - no patterns to collapse + lines.append("| Parameter | Type | Description |") + lines.append("|-----------|------|-------------|") + + for name, param in params: + type_str = _type_to_str(param.param_type) + desc = get_description(name) or "" + constraints = _format_constraints(param) + if constraints: + desc = f"{desc} ({constraints})" if desc else constraints + # Truncate long descriptions + if len(desc) > 80: + desc = desc[:77] + "..." + # Escape % for Doxygen + name_escaped = _escape_percent(name) + lines.append(f"| `{name_escaped}` | {type_str} | {desc} |") + + lines.append("") + + lines.append("---") + lines.append("") + + # Add footer + lines.extend([ + "## Command Line Reference", + "", + "Search parameters using the CLI:", + "", + "```bash", + "# Search for parameters", + "./mfc.sh params weno", + "", + "# Show parameter descriptions", + "./mfc.sh params weno -d", + "", + "# List all families", + "./mfc.sh params -f", + "", + "# Filter by type", + "./mfc.sh params -t real weno", + "```", + "", + ]) + + return "\n".join(lines) + + +def write_parameter_docs(output_path: str) -> int: + """Write parameter documentation to file. + + Returns: + Number of parameters documented + """ + content = generate_parameter_docs() + with open(output_path, 'w') as f: + f.write(content) + return len(REGISTRY.all_params) diff --git a/toolchain/mfc/params/generators/json_schema_gen.py b/toolchain/mfc/params/generators/json_schema_gen.py new file mode 100644 index 0000000000..21a8feb8b1 --- /dev/null +++ b/toolchain/mfc/params/generators/json_schema_gen.py @@ -0,0 +1,128 @@ +""" +JSON Schema Generator for MFC Case Files. + +Generates VS Code / PyCharm compatible JSON Schema for case file auto-completion. +""" +# pylint: disable=import-outside-toplevel + +import json +from typing import Dict, Any +from ..schema import ParamType +from ..registry import REGISTRY +from .. import definitions # noqa: F401 pylint: disable=unused-import + + +def _param_type_to_json_schema(param_type: ParamType, constraints: Dict = None) -> Dict[str, Any]: + """Convert ParamType to JSON Schema type definition.""" + base_schemas = { + ParamType.INT: {"type": "integer"}, + ParamType.REAL: {"type": "number"}, + ParamType.LOG: {"type": "string", "enum": ["T", "F"]}, + ParamType.STR: {"type": "string"}, + # Analytic types allow strings (expressions) or their base type + ParamType.ANALYTIC_INT: {"oneOf": [{"type": "integer"}, {"type": "string"}]}, + ParamType.ANALYTIC_REAL: {"oneOf": [{"type": "number"}, {"type": "string"}]}, + } + + schema = base_schemas.get(param_type, {"type": "string"}).copy() + + # Add constraints + if constraints: + if "choices" in constraints and param_type in (ParamType.INT, ParamType.REAL): + schema["enum"] = constraints["choices"] + if "min" in constraints: + schema["minimum"] = constraints["min"] + if "max" in constraints: + schema["maximum"] = constraints["max"] + + return schema + + +def generate_json_schema(include_descriptions: bool = True) -> Dict[str, Any]: + """ + Generate JSON Schema for MFC case file parameters. + + Args: + include_descriptions: Include parameter descriptions in schema + + Returns: + JSON Schema dict + """ + from ..descriptions import get_description + + properties = {} + all_params = [] + + for name, param in sorted(REGISTRY.all_params.items()): + prop_schema = _param_type_to_json_schema(param.param_type, param.constraints) + + if include_descriptions: + # Get description from descriptions module + desc = get_description(name) + if desc: + prop_schema["description"] = desc + + # Add deprecation notice if applicable + if param.dependencies and "deprecated" in param.dependencies: + prop_schema["deprecated"] = True + prop_schema["deprecationMessage"] = param.dependencies["deprecated"] + + properties[name] = prop_schema + all_params.append(name) + + schema = { + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "https://mflowcode.github.io/schemas/mfc-case.json", + "title": "MFC Case File Schema", + "description": "Schema for MFC (Multi-component Flow Code) simulation case parameters", + "type": "object", + "properties": properties, + "additionalProperties": False, + } + + return schema + + +def generate_vscode_settings() -> Dict[str, Any]: + """Generate VS Code settings snippet for JSON Schema association.""" + return { + "json.schemas": [ + { + "fileMatch": ["case.py", "**/case.py"], + "url": "./mfc-case-schema.json" + } + ], + "yaml.schemas": { + "./mfc-case-schema.json": ["case.yaml", "**/case.yaml"] + } + } + + +def write_json_schema(output_path: str, include_descriptions: bool = True) -> None: + """ + Write JSON Schema to file. + + Args: + output_path: Path to write schema file + include_descriptions: Include parameter descriptions + """ + schema = generate_json_schema(include_descriptions) + + with open(output_path, 'w') as f: + json.dump(schema, f, indent=2) + + +def get_schema_stats() -> Dict[str, int]: + """Get statistics about the generated schema.""" + from ..descriptions import get_description + + schema = generate_json_schema(include_descriptions=False) + props = schema.get("properties", {}) + + stats = { + "total_params": len(props), + "with_constraints": sum(1 for p in props.values() if "enum" in p or "minimum" in p or "maximum" in p), + "with_descriptions": sum(1 for name in REGISTRY.all_params if get_description(name)), + } + + return stats diff --git a/toolchain/mfc/params/namelist_parser.py b/toolchain/mfc/params/namelist_parser.py new file mode 100644 index 0000000000..391298cc2d --- /dev/null +++ b/toolchain/mfc/params/namelist_parser.py @@ -0,0 +1,167 @@ +""" +Parse Fortran namelist definitions to extract valid parameters for each target. + +This module reads the Fortran source files and extracts the parameter names +from each target's namelist definition. This ensures the Python toolchain +stays in sync with what the Fortran code actually accepts. +""" + +import re +from pathlib import Path +from typing import Dict, Set + + +def parse_namelist_from_file(filepath: Path) -> Set[str]: + """ + Parse a Fortran file and extract parameter names from the namelist definition. + + Args: + filepath: Path to the Fortran source file (m_start_up.fpp) + + Returns: + Set of parameter names found in the namelist + """ + content = filepath.read_text() + + # Find the namelist block - starts with "namelist /user_inputs/" + # and continues until a line without continuation (&) or a blank line + namelist_match = re.search( + r'namelist\s+/user_inputs/\s*(.+?)(?=\n\s*\n|\n\s*!(?!\s*&)|\n\s*[a-zA-Z_]+\s*=)', + content, + re.DOTALL | re.IGNORECASE + ) + + if not namelist_match: + raise ValueError(f"Could not find namelist /user_inputs/ in {filepath}") + + namelist_text = namelist_match.group(1) + + # Remove Fortran line continuations (&) and join lines + namelist_text = re.sub(r'&\s*\n\s*', ' ', namelist_text) + + # Remove preprocessor directives (#:if, #:endif, etc.) + namelist_text = re.sub(r'#:.*', '', namelist_text) + + # Remove comments (! to end of line, but not inside strings) + namelist_text = re.sub(r'!.*', '', namelist_text) + + # Extract parameter names - they're comma-separated identifiers + # Parameter names are alphanumeric with underscores + found_params = set() + for match in re.finditer(r'\b([a-zA-Z_][a-zA-Z0-9_]*)\b', namelist_text): + name = match.group(1) + # Skip Fortran keywords that might appear + if name.lower() not in {'namelist', 'user_inputs', 'if', 'endif', 'not'}: + found_params.add(name) + + return found_params + + +def parse_all_namelists(mfc_root: Path) -> Dict[str, Set[str]]: + """ + Parse namelist definitions from all MFC targets. + + Args: + mfc_root: Path to MFC root directory + + Returns: + Dict mapping target name to set of valid parameter names + """ + targets = { + 'pre_process': mfc_root / 'src' / 'pre_process' / 'm_start_up.fpp', + 'simulation': mfc_root / 'src' / 'simulation' / 'm_start_up.fpp', + 'post_process': mfc_root / 'src' / 'post_process' / 'm_start_up.fpp', + } + + result = {} + for target_name, filepath in targets.items(): + if not filepath.exists(): + raise FileNotFoundError(f"Fortran source not found: {filepath}") + result[target_name] = parse_namelist_from_file(filepath) + + return result + + +def get_mfc_root() -> Path: + """Get the MFC root directory from this file's location.""" + # This file is at toolchain/mfc/params/namelist_parser.py + # MFC root is 4 levels up + return Path(__file__).resolve().parent.parent.parent.parent + + +# Module-level cache for parsed target params +_TARGET_PARAMS_CACHE: Dict[str, Set[str]] = {} + + +def get_target_params() -> Dict[str, Set[str]]: + """ + Get the valid parameters for each target, parsing Fortran if needed. + + Returns: + Dict mapping target name to set of valid parameter names + """ + if not _TARGET_PARAMS_CACHE: + _TARGET_PARAMS_CACHE.update(parse_all_namelists(get_mfc_root())) + return _TARGET_PARAMS_CACHE + + +def is_param_valid_for_target(param_name: str, target_name: str) -> bool: + """ + Check if a parameter is valid for a given target. + + This handles both scalar params (like "m") and indexed params + (like "patch_icpp(1)%geometry") by checking the base name. + + Args: + param_name: The parameter name (may include indices like "(1)%attr") + target_name: One of 'pre_process', 'simulation', 'post_process' + + Returns: + True if the parameter is valid for the target + """ + valid_params = get_target_params().get(target_name, set()) + + # Extract base parameter name (before any index or attribute) + # e.g., "patch_icpp(1)%geometry" -> "patch_icpp" + # e.g., "fluid_pp(2)%gamma" -> "fluid_pp" + base_match = re.match(r'^([a-zA-Z_][a-zA-Z0-9_]*)', param_name) + if base_match: + return base_match.group(1) in valid_params + + return param_name in valid_params + + +if __name__ == '__main__': + # Test the parser + import sys + + try: + parsed_targets = parse_all_namelists(get_mfc_root()) + + print("Parsed namelist parameters:\n") + for tgt, tgt_params in sorted(parsed_targets.items()): + print(f"{tgt}: {len(tgt_params)} parameters") + # Print first 10 as sample + sorted_list = sorted(tgt_params) + for param in sorted_list[:10]: + print(f" - {param}") + if len(tgt_params) > 10: + print(f" ... and {len(tgt_params) - 10} more") + print() + + # Show params unique to each target + print("Parameters unique to each target:\n") + all_param_names = set.union(*parsed_targets.values()) + for tgt, tgt_params in sorted(parsed_targets.items()): + other = set.union(*[p for t, p in parsed_targets.items() if t != tgt]) + unique = tgt_params - other + print(f"{tgt} only ({len(unique)}): {sorted(unique)[:15]}...") + print() + + # Show params in all targets + common = set.intersection(*parsed_targets.values()) + print(f"Parameters in ALL targets ({len(common)}): {sorted(common)[:20]}...") + + except Exception as exc: + print(f"Error: {exc}", file=sys.stderr) + sys.exit(1) diff --git a/toolchain/mfc/params/registry.py b/toolchain/mfc/params/registry.py new file mode 100644 index 0000000000..f6f66b3176 --- /dev/null +++ b/toolchain/mfc/params/registry.py @@ -0,0 +1,192 @@ +""" +Parameter Registry. + +Central storage for MFC parameter definitions. This module provides the +ParamRegistry class which serves as the single source of truth for all +~3,300 MFC parameters. + +Usage +----- +The global REGISTRY instance is populated by importing the definitions module. +Once populated, parameters can be queried by name or by tag: + + from mfc.params import REGISTRY + + # Get a specific parameter + param = REGISTRY.all_params.get('m') + + # Get parameters by feature tag + mhd_params = REGISTRY.get_params_by_tag('mhd') + +Thread Safety +------------- +The registry is populated once at import time and frozen (made immutable). +After freezing, it is safe to read from multiple threads. Attempts to +register new parameters after freezing will raise RuntimeError. +""" + +from typing import Dict, Set, Mapping, Any +from types import MappingProxyType +from collections import defaultdict +from functools import lru_cache + +from .schema import ParamDef + + +class RegistryFrozenError(RuntimeError): + """Raised when attempting to modify a frozen registry.""" + + +class ParamRegistry: + """ + Central registry for MFC parameters. + + This class stores parameter definitions and provides lookup methods + for retrieving parameters by name or by feature tag. + + The registry can be frozen after initialization to prevent further + modifications, ensuring thread-safety for read operations. + + Attributes: + _params: Dictionary mapping parameter names to ParamDef instances. + _by_tag: Dictionary mapping tags to sets of parameter names. + _frozen: Whether the registry has been frozen (immutable). + """ + + def __init__(self): + """Initialize an empty registry.""" + self._params: Dict[str, ParamDef] = {} + self._by_tag: Dict[str, Set[str]] = defaultdict(set) + self._frozen: bool = False + self._params_proxy: Mapping[str, ParamDef] = None + + def freeze(self) -> None: + """ + Freeze the registry, preventing further modifications. + + After calling this method: + - register() will raise RegistryFrozenError + - all_params returns a read-only view (MappingProxyType) + + This method is idempotent (safe to call multiple times). + """ + if not self._frozen: + self._frozen = True + self._params_proxy = MappingProxyType(self._params) + + @property + def is_frozen(self) -> bool: + """Return True if the registry has been frozen.""" + return self._frozen + + def register(self, param: ParamDef) -> None: + """ + Register a parameter definition. + + If a parameter with the same name already exists, the tags are + merged. This allows parameters to be defined incrementally across + multiple definition files. + + Args: + param: The parameter definition to register. + + Raises: + RegistryFrozenError: If the registry has been frozen. + ValueError: If a parameter with the same name exists but has + a different type (type mismatch is not allowed). + """ + if self._frozen: + raise RegistryFrozenError( + f"Cannot register '{param.name}': registry is frozen. " + "All parameters must be registered during module initialization." + ) + + if param.name in self._params: + existing = self._params[param.name] + if existing.param_type != param.param_type: + raise ValueError(f"Type mismatch for '{param.name}'") + existing.tags.update(param.tags) + for tag in param.tags: + self._by_tag[tag].add(param.name) + return + + self._params[param.name] = param + for tag in param.tags: + self._by_tag[tag].add(param.name) + + @property + def all_params(self) -> Mapping[str, ParamDef]: + """ + Get all registered parameters. + + Returns: + Mapping of parameter names to their definitions. + If the registry is frozen, returns a read-only view. + If not frozen, returns the internal dict (mutable). + """ + if self._frozen and self._params_proxy is not None: + return self._params_proxy + return self._params + + def get_params_by_tag(self, tag: str) -> Dict[str, ParamDef]: + """ + Get parameters with a specific feature tag. + + Args: + tag: The feature tag (e.g., "mhd", "bubbles", "weno"). + + Returns: + Dictionary mapping parameter names to their definitions. + """ + return {name: self._params[name] for name in self._by_tag.get(tag, set())} + + def get_all_tags(self) -> Set[str]: + """ + Get all feature tags used in the registry. + + Returns: + Set of all tag names. + """ + return set(self._by_tag.keys()) + + def get_json_schema(self) -> Dict[str, Any]: + """ + Generate JSON schema for case file validation. + + Returns: + JSON schema dict compatible with fastjsonschema. + """ + properties = { + name: param.param_type.json_schema + for name, param in self.all_params.items() + } + + return { + "type": "object", + "properties": properties, + "additionalProperties": False + } + + def get_validator(self): + """ + Get a cached JSON schema validator for all parameters. + + Returns: + Compiled fastjsonschema validator function. + """ + # Use module-level cache since registry is frozen + return _get_cached_validator(id(self)) + + +@lru_cache(maxsize=1) +def _get_cached_validator(registry_id: int): # pylint: disable=unused-argument + """Cache the validator at module level (registry is immutable after freeze). + + Note: registry_id is used as cache key to invalidate when registry changes. + """ + import fastjsonschema # pylint: disable=import-outside-toplevel + return fastjsonschema.compile(REGISTRY.get_json_schema()) + + +# Global registry instance - populated when definitions module is imported +REGISTRY = ParamRegistry() diff --git a/toolchain/mfc/params/schema.py b/toolchain/mfc/params/schema.py new file mode 100644 index 0000000000..4d282c08e0 --- /dev/null +++ b/toolchain/mfc/params/schema.py @@ -0,0 +1,101 @@ +""" +Parameter Schema Definitions. + +Provides ParamDef for parameter metadata, constraints, and dependencies. +""" + +from dataclasses import dataclass, field +from enum import Enum +from typing import Set, Any, Optional, Dict, List + +from .errors import constraint_error + + +class ParamType(Enum): + """Parameter types matching MFC's Fortran types with JSON schema support.""" + INT = "int" + REAL = "real" + LOG = "log" + STR = "str" + ANALYTIC_INT = "analytic:int" + ANALYTIC_REAL = "analytic:real" + + @property + def json_schema(self) -> Dict[str, Any]: + """ + Return JSON schema fragment for this parameter type. + + Used by fastjsonschema for case file validation. + """ + schemas = { + ParamType.INT: {"type": "integer"}, + ParamType.REAL: {"type": "number"}, + ParamType.LOG: {"enum": ["T", "F"]}, + ParamType.STR: {"type": "string"}, + # Analytic types accept either the base type or a string expression + ParamType.ANALYTIC_INT: {"type": ["integer", "string"]}, + ParamType.ANALYTIC_REAL: {"type": ["number", "string"]}, + } + return schemas[self] + + +@dataclass +class ParamDef: + """ + Definition of a single MFC parameter. + + Attributes: + name: Parameter name + param_type: Type (INT, REAL, LOG, STR, ANALYTIC_*) + description: Human-readable description + case_optimization: Can be hard-coded for GPU builds + constraints: Validation constraints (choices, min, max) + dependencies: Related params (requires, recommends) + tags: Feature tags for grouping (e.g., "mhd", "bubbles", "weno") + """ + name: str + param_type: ParamType + description: str = "" + case_optimization: bool = False + constraints: Optional[Dict[str, Any]] = None # {"choices": [...], "min": N, "max": N} + dependencies: Optional[Dict[str, Any]] = None # {"requires": [...], "recommends": [...]} + tags: Set[str] = field(default_factory=set) # Feature tags: "mhd", "bubbles", etc. + + def __post_init__(self): + # Validate name + if not self.name or not isinstance(self.name, str): + raise ValueError("ParamDef name must be a non-empty string") + + @property + def type_tag(self) -> str: + return self.param_type.value + + def validate_value(self, value: Any) -> List[str]: + """ + Validate a value against this parameter's constraints. + + Returns list of error messages (empty if valid). + Uses consistent error formatting from errors.py. + """ + errors = [] + if self.constraints is None: + return errors + + # Check choices constraint + if "choices" in self.constraints: + choices = self.constraints["choices"] + if value not in choices: + errors.append(constraint_error(self.name, "choices", choices, value)) + + # Check numeric range constraints (only for numeric values, not analytic strings) + if isinstance(value, (int, float)): + if "min" in self.constraints and value < self.constraints["min"]: + errors.append( + constraint_error(self.name, "min", self.constraints["min"], value) + ) + if "max" in self.constraints and value > self.constraints["max"]: + errors.append( + constraint_error(self.name, "max", self.constraints["max"], value) + ) + + return errors diff --git a/toolchain/mfc/params/suggest.py b/toolchain/mfc/params/suggest.py new file mode 100644 index 0000000000..6f46107b33 --- /dev/null +++ b/toolchain/mfc/params/suggest.py @@ -0,0 +1,146 @@ +""" +Fuzzy Matching for Parameter Suggestions. + +Provides "did you mean?" functionality for typo detection using rapidfuzz +for fast string matching. + +Primary use case: When users mistype parameter names in case files, suggest +the correct parameter name (e.g., "model_eqn" -> "Did you mean 'model_eqns'?"). + +Also used internally to validate constraint/dependency schemas during +module initialization, catching developer typos in CONSTRAINTS/DEPENDENCIES dicts. +""" + +from typing import List, Iterable +from functools import lru_cache + +# Import rapidfuzz - falls back gracefully if not installed +try: + from rapidfuzz import process, fuzz + RAPIDFUZZ_AVAILABLE = True +except ImportError: + RAPIDFUZZ_AVAILABLE = False + +# Minimum similarity score (0-100) to consider a match +MIN_SIMILARITY_SCORE = 60 + +# Maximum number of suggestions to return +MAX_SUGGESTIONS = 3 + + +def suggest_similar( + unknown: str, + valid_options: Iterable[str], + min_score: int = MIN_SIMILARITY_SCORE, + max_suggestions: int = MAX_SUGGESTIONS, +) -> List[str]: + """ + Find similar strings from valid_options that match the unknown string. + + Uses rapidfuzz for fast fuzzy string matching. Falls back to empty list + if rapidfuzz is not available. + + Args: + unknown: The unknown/misspelled string to match. + valid_options: Iterable of valid strings to match against. + min_score: Minimum similarity score (0-100) to include a match. + max_suggestions: Maximum number of suggestions to return. + + Returns: + List of similar valid options, sorted by similarity (best first). + Empty list if no good matches found or rapidfuzz not available. + """ + if not RAPIDFUZZ_AVAILABLE: + return [] + + if not unknown or not valid_options: + return [] + + # Convert to list if needed (rapidfuzz needs indexable sequence) + options_list = list(valid_options) + if not options_list: + return [] + + # Use rapidfuzz to find best matches + # process.extract returns list of (match, score, index) tuples + matches = process.extract( + unknown, + options_list, + scorer=fuzz.WRatio, # Weighted ratio handles partial matches well + limit=max_suggestions, + score_cutoff=min_score, + ) + + return [match[0] for match in matches] + + +def format_suggestion(suggestions: List[str]) -> str: + """ + Format a "did you mean?" suggestion message. + + Args: + suggestions: List of suggested alternatives. + + Returns: + Formatted suggestion string, or empty string if no suggestions. + """ + if not suggestions: + return "" + + if len(suggestions) == 1: + return f"Did you mean '{suggestions[0]}'?" + quoted = [f"'{s}'" for s in suggestions] + return f"Did you mean one of: {', '.join(quoted)}?" + + +def suggest_parameter(unknown_param: str) -> List[str]: + """ + Suggest similar parameter names from the registry. + + Args: + unknown_param: Unknown parameter name. + + Returns: + List of similar valid parameter names. + """ + # Import here to avoid circular import (registry imports definitions which may use suggest) + from .registry import REGISTRY # pylint: disable=import-outside-toplevel + + return suggest_similar(unknown_param, REGISTRY.all_params.keys()) + + +@lru_cache(maxsize=128) +def get_param_suggestions_cached(unknown_param: str) -> tuple: + """ + Cached version of suggest_parameter for repeated lookups. + + Returns tuple for hashability in cache. + """ + return tuple(suggest_parameter(unknown_param)) + + +def invalid_key_error( + context: str, + invalid_key: str, + valid_keys: Iterable[str], +) -> str: + """ + Create an error message for an invalid key with suggestions. + + Args: + context: Description of what the key is for (e.g., "constraint", "dependency"). + invalid_key: The invalid key that was used. + valid_keys: The set of valid keys. + + Returns: + Error message with valid keys listed and "did you mean?" if applicable. + """ + valid_set = set(valid_keys) + suggestions = suggest_similar(invalid_key, valid_set) + suggestion_text = format_suggestion(suggestions) + + base_msg = f"Invalid {context} key '{invalid_key}'. Valid keys are: {sorted(valid_set)}" + + if suggestion_text: + return f"{base_msg}. {suggestion_text}" + return base_msg diff --git a/toolchain/mfc/params/validate.py b/toolchain/mfc/params/validate.py new file mode 100644 index 0000000000..b004ed4797 --- /dev/null +++ b/toolchain/mfc/params/validate.py @@ -0,0 +1,197 @@ +""" +Parameter Validation with Constraints and Dependencies. + +Provides enhanced validation beyond JSON schema type checking. + +Relationship to case_validator.py +--------------------------------- +This module (params/validate.py) provides **generic parameter validation**: +- Type checking (int, real, string, etc.) +- Range constraints (min/max values) +- Choice validation (enum-like constraints) +- Parameter dependencies (requires/recommends) + +The case_validator.py module provides **domain-specific physics validation**: +- Cross-parameter consistency checks (e.g., bubble model + polytropic settings) +- Model-specific requirements (e.g., WENO order constraints) +- Stage-specific validation (pre_process, simulation, post_process) +- 50+ physics-aware constraint checks + +These modules are complementary: +- params/validate.py: Fast, generic checks that apply to all parameters +- case_validator.py: Comprehensive physics validation for MFC simulations + +Typical usage: + 1. JSON schema validation (via mfc-case-schema.json) + 2. Generic constraint validation (via this module) + 3. Physics validation (via case_validator.py) +""" + +from typing import Dict, Any, List, Tuple +from .registry import REGISTRY +from .errors import ( + dependency_error, + dependency_recommendation, + format_error_list, + unknown_param_error, +) +from .suggest import suggest_parameter +# Note: definitions is imported by params/__init__.py to populate REGISTRY. +# This redundant import ensures REGISTRY is populated even if this module +# is imported directly (e.g., during testing). +from . import definitions # noqa: F401 pylint: disable=unused-import + + +def check_unknown_params(params: Dict[str, Any]) -> List[str]: + """ + Check for unknown parameters and suggest corrections. + + Uses fuzzy matching via rapidfuzz to provide "Did you mean?" suggestions + for parameter names that don't exist in the registry. + + Args: + params: Dictionary of parameter name -> value + + Returns: + List of error messages for unknown parameters with suggestions. + """ + errors = [] + + for name in params.keys(): + if name not in REGISTRY.all_params: + suggestions = suggest_parameter(name) + errors.append(unknown_param_error(name, suggestions)) + + return errors + + +def validate_constraints(params: Dict[str, Any]) -> List[str]: + """ + Validate parameter values against their constraints. + + Args: + params: Dictionary of parameter name -> value + + Returns: + List of error messages (empty if all valid) + """ + errors = [] + + for name, value in params.items(): + param_def = REGISTRY.all_params.get(name) + if param_def is None: + continue # Unknown params handled by check_unknown_params + + # Skip analytic expressions (strings for numeric params) + if isinstance(value, str) and param_def.param_type.value.startswith("analytic"): + continue + if isinstance(value, str) and param_def.param_type.value in ("int", "real"): + # This is an analytic expression, skip constraint check + continue + + param_errors = param_def.validate_value(value) + errors.extend(param_errors) + + return errors + + +def check_dependencies(params: Dict[str, Any]) -> Tuple[List[str], List[str]]: # pylint: disable=too-many-branches + """ + Check parameter dependencies. + + Args: + params: Dictionary of parameter name -> value + + Returns: + Tuple of (errors, warnings) + - errors: Missing required params + - warnings: Missing recommended params + """ + errors = [] + warnings = [] + + for name, value in params.items(): + param_def = REGISTRY.all_params.get(name) + if param_def is None or param_def.dependencies is None: + continue + + deps = param_def.dependencies + + # Check "when_true" dependencies (for LOG params set to "T") + if "when_true" in deps and value == "T": + when_true = deps["when_true"] + + # Required params + if "requires" in when_true: + for req in when_true["requires"]: + if req not in params: + errors.append(dependency_error(name, req, "=T")) + + # Recommended params + if "recommends" in when_true: + for rec in when_true["recommends"]: + if rec not in params: + warnings.append(dependency_recommendation(name, rec, "=T")) + + # Check "when_set" dependencies (for any param that's set) + if "when_set" in deps: + when_set = deps["when_set"] + + if "requires" in when_set: + for req in when_set["requires"]: + if req not in params: + errors.append(dependency_error(name, req)) + + if "recommends" in when_set: + for rec in when_set["recommends"]: + if rec not in params: + warnings.append(dependency_recommendation(name, rec)) + + return errors, warnings + + +def validate_case( + params: Dict[str, Any], + warn: bool = True, + check_unknown: bool = True, +) -> Tuple[List[str], List[str]]: + """ + Full validation of case parameters. + + Args: + params: Dictionary of parameter name -> value + warn: Whether to check for warnings (recommended params) + check_unknown: Whether to check for unknown parameters + + Returns: + Tuple of (errors, warnings) + """ + errors = [] + warnings = [] + + # Check for unknown parameters with "did you mean?" suggestions + if check_unknown: + unknown_errors = check_unknown_params(params) + errors.extend(unknown_errors) + + # Check constraints + constraint_errors = validate_constraints(params) + errors.extend(constraint_errors) + + # Check dependencies + if warn: + dep_errors, dep_warnings = check_dependencies(params) + errors.extend(dep_errors) + warnings.extend(dep_warnings) + + return errors, warnings + + +def format_validation_results(errors: List[str], warnings: List[str]) -> str: + """ + Format validation results for display. + + Uses the centralized formatting from errors.py for consistency + across all validation systems. + """ + return format_error_list(errors, warnings, use_rich=True) diff --git a/toolchain/mfc/params_cmd.py b/toolchain/mfc/params_cmd.py new file mode 100644 index 0000000000..a25b104c26 --- /dev/null +++ b/toolchain/mfc/params_cmd.py @@ -0,0 +1,416 @@ +""" +MFC Parameter Search and Discovery Command. + +Provides CLI access to search and explore MFC's ~3,300 case parameters. +""" +# pylint: disable=import-outside-toplevel + +import re +from .state import ARG +from .printer import cons + + +def params(): + """Execute the params command based on CLI arguments.""" + from .params import REGISTRY + from .params import definitions # noqa: F401 pylint: disable=unused-import + + query = ARG("query") + type_filter = ARG("param_type") + show_families = ARG("families") + show_features = ARG("features") + feature_name = ARG("feature") + names_only = ARG("names_only") + show_count = ARG("count") + limit = ARG("limit") + describe = ARG("describe") + + # By default, search both names and descriptions (more user-friendly) + search_descriptions = not names_only + + if show_count: + _show_statistics(REGISTRY) + elif show_features: + _show_feature_groups(REGISTRY) + elif feature_name: + _show_feature_params(REGISTRY, feature_name, type_filter, limit, describe) + elif show_families: + _show_families(REGISTRY, limit) + elif query: + _search_params(REGISTRY, query, type_filter, limit, describe, search_descriptions) + else: + _show_statistics(REGISTRY) + cons.print() + cons.print("[yellow]Tip:[/yellow] Use './mfc.sh params ' to search for parameters") + cons.print(" Use './mfc.sh params --feature mhd' to see MHD parameters") + cons.print(" Use './mfc.sh params -F' to see all feature groups") + + +def _collapse_indexed_params(matches): # pylint: disable=too-many-locals,too-many-branches,too-many-statements + """ + Collapse indexed parameters into patterns. + + Handles multiple index patterns: + - Suffix index: bc_z%alpha_in(1) -> bc_z%alpha_in(N) + - Prefix index: patch_icpp(1)%geometry -> patch_icpp(N)%geometry + - Both: patch_icpp(1)%alpha(1) -> patch_icpp(N)%alpha(M) + """ + # Patterns for different index positions + # Pattern 1: prefix(N)%suffix or prefix(N)%suffix(M) + prefix_pattern = re.compile(r'^([^(]+)\((\d+)\)%(.+)$') + # Pattern 2: name(N) or name(N, M) at end + suffix_pattern = re.compile(r'^(.+)\((\d+)(?:,\s*(\d+))?\)$') + + # Two-level grouping: first by base pattern (with indices replaced), then collect indices + groups = {} # normalized_pattern -> {indices: [...], param_type, stages, pattern_type} + + for name, param in matches: + # Try prefix pattern first: patch_icpp(1)%geometry + prefix_match = prefix_pattern.match(name) + if prefix_match: + prefix = prefix_match.group(1) + idx1 = int(prefix_match.group(2)) + suffix = prefix_match.group(3) + + # Check if suffix also has an index + suffix_match = suffix_pattern.match(suffix) + if suffix_match: + suffix_base = suffix_match.group(1) + idx2 = int(suffix_match.group(2)) + idx3 = int(suffix_match.group(3)) if suffix_match.group(3) else None + # Pattern: prefix(N)%suffix_base(M) or prefix(N)%suffix_base(M, K) + if idx3 is not None: + base_pattern = f"{prefix}(N)%{suffix_base}(M, K)" + indices_key = (idx1, idx2, idx3) + else: + base_pattern = f"{prefix}(N)%{suffix_base}(M)" + indices_key = (idx1, idx2, None) + else: + # Pattern: prefix(N)%suffix + base_pattern = f"{prefix}(N)%{suffix}" + indices_key = (idx1, None, None) + + if base_pattern not in groups: + groups[base_pattern] = { + 'indices': [], + 'param_type': param.param_type, + } + groups[base_pattern]['indices'].append((indices_key, param)) + continue + + # Try suffix-only pattern: name(N) or name(N, M) + suffix_match = suffix_pattern.match(name) + if suffix_match: + base = suffix_match.group(1) + idx1 = int(suffix_match.group(2)) + idx2 = int(suffix_match.group(3)) if suffix_match.group(3) else None + + if idx2 is not None: + base_pattern = f"{base}(N, M)" + indices_key = (idx1, idx2, None) + else: + base_pattern = f"{base}(N)" + indices_key = (idx1, None, None) + + if base_pattern not in groups: + groups[base_pattern] = { + 'indices': [], + 'param_type': param.param_type, + } + groups[base_pattern]['indices'].append((indices_key, param)) + continue + + # No index pattern - add as-is + if name not in groups: + groups[name] = { + 'indices': [(None, param)], + 'param_type': param.param_type, + } + else: + groups[name]['indices'].append((None, param)) + + # Build collapsed results + collapsed = [] + + for pattern, data in sorted(groups.items()): + indices = data['indices'] + param = indices[0][1] # Get param from first entry + count = len(indices) + + if count == 1 and indices[0][0] is None: + # Non-indexed parameter + collapsed.append((pattern, param, 1)) + elif count == 1: + # Single indexed parameter - show with actual index + idx_tuple = indices[0][0] + # Reconstruct the actual name + actual_name = pattern + if idx_tuple[0] is not None: + actual_name = actual_name.replace('(N)', f'({idx_tuple[0]})', 1) + if idx_tuple[1] is not None: + actual_name = actual_name.replace('(M)', f'({idx_tuple[1]})', 1) + if idx_tuple[2] is not None: + actual_name = actual_name.replace('(K)', f'({idx_tuple[2]})', 1) + collapsed.append((actual_name, param, 1)) + else: + # Multiple indices - build range string + range_parts = [] + + # Extract index values + idx1_vals = sorted(set(idx[0] for idx, _ in indices if idx and idx[0] is not None)) + idx2_vals = sorted(set(idx[1] for idx, _ in indices if idx and idx[1] is not None)) + idx3_vals = sorted(set(idx[2] for idx, _ in indices if idx and idx[2] is not None)) + + if idx1_vals: + if idx1_vals == list(range(min(idx1_vals), max(idx1_vals) + 1)): + range_parts.append(f"N={min(idx1_vals)}..{max(idx1_vals)}") + else: + range_parts.append(f"N={min(idx1_vals)}..{max(idx1_vals)}") + if idx2_vals: + if idx2_vals == list(range(min(idx2_vals), max(idx2_vals) + 1)): + range_parts.append(f"M={min(idx2_vals)}..{max(idx2_vals)}") + else: + range_parts.append(f"M={min(idx2_vals)}..{max(idx2_vals)}") + if idx3_vals: + range_parts.append(f"K={min(idx3_vals)}..{max(idx3_vals)}") + + range_str = ", ".join(range_parts) if range_parts else "" + collapsed.append((pattern, param, count, range_str)) + + # Sort by name + collapsed.sort(key=lambda x: x[0]) + + return collapsed + + +def _show_statistics(registry): + """Show parameter count statistics.""" + cons.print("[bold]MFC Parameter Statistics[/bold]") + cons.print() + + cons.print(f" Total parameters: [cyan]{len(registry.all_params)}[/cyan]") + + # Count by type + by_type = {} + for param in registry.all_params.values(): + tname = param.param_type.name + by_type[tname] = by_type.get(tname, 0) + 1 + + cons.print() + cons.print(" By type:") + for tname, count in sorted(by_type.items(), key=lambda x: -x[1]): + cons.print(f" {tname:15} {count:5}") + + +def _show_feature_groups(registry): + """Show available feature groups.""" + from .params.descriptions import FEATURE_DESCRIPTIONS + + cons.print("[bold]Feature Groups[/bold]") + cons.print() + cons.print(" Use './mfc.sh params --feature ' to see parameters for a feature.") + cons.print() + cons.print(f" {'Feature':<20} {'Description'}") + cons.print(f" {'-'*20} {'-'*50}") + + # Get all tags from registry and show with descriptions + all_tags = registry.get_all_tags() + for tag in sorted(all_tags): + desc = FEATURE_DESCRIPTIONS.get(tag, "") + cons.print(f" [cyan]{tag:<20}[/cyan] {desc}") + + cons.print() + cons.print("[yellow]Example:[/yellow] ./mfc.sh params --feature mhd") + + +def _show_feature_params(registry, feature_name, type_filter, limit, describe): + """Show all parameters for a feature group.""" + from .params.descriptions import FEATURE_DESCRIPTIONS + + # Check if feature exists in registry + all_tags = registry.get_all_tags() + if feature_name not in all_tags: + cons.print(f"[red]Unknown feature group: '{feature_name}'[/red]") + cons.print() + cons.print("Available feature groups:") + for name in sorted(all_tags): + cons.print(f" {name}") + return + + # Get params by tag from registry (single source of truth) + tagged_params = registry.get_params_by_tag(feature_name) + + # Build matches list + matches = [] + for name, param in tagged_params.items(): + # Apply type filter + if type_filter and param.param_type.value != type_filter: + continue + matches.append((name, param)) + + if not matches: + cons.print(f"[yellow]No parameters found for feature '{feature_name}'[/yellow]") + return + + # Collapse indexed parameters + collapsed = _collapse_indexed_params(matches) + + desc = FEATURE_DESCRIPTIONS.get(feature_name, feature_name.title() + " parameters") + cons.print(f"[bold]{desc}[/bold] ({len(matches)} params, {len(collapsed)} unique patterns)") + cons.print() + + # Show collapsed results + _show_collapsed_results(collapsed[:limit], describe) + + if len(collapsed) > limit: + cons.print() + cons.print(f" [dim]... {len(collapsed) - limit} more patterns (use -n {len(collapsed)} to show all)[/dim]") + + +def _show_families(registry, limit): + """Show parameter families grouped by prefix.""" + families = {} + for name in registry.all_params.keys(): + if "%" in name: + # Get prefix before %, then strip any index: patch_icpp(1)%x -> patch_icpp + prefix = name.split("%")[0] + if "(" in prefix: + prefix = prefix.split("(")[0] + elif "(" in name: + # Simple indexed: chem_wrt_Y(0) -> chem_wrt_Y + prefix = name.split("(")[0] + else: + continue # Skip simple params + families[prefix] = families.get(prefix, 0) + 1 + + sorted_families = sorted(families.items(), key=lambda x: -x[1]) + + cons.print("[bold]Parameter Families[/bold]") + cons.print() + cons.print(f" {'Family':<40} {'Count':>6}") + cons.print(f" {'-'*40} {'-'*6}") + + for prefix, count in sorted_families[:limit]: + cons.print(f" {prefix:<40} {count:>6}") + + if len(sorted_families) > limit: + cons.print(f" ... and {len(sorted_families) - limit} more families") + + cons.print() + cons.print("[yellow]Tip:[/yellow] Use './mfc.sh params ' to see parameters in a family") + + +def _search_params(registry, query, type_filter, limit, describe=False, search_descriptions=True): # pylint: disable=too-many-arguments,too-many-positional-arguments,too-many-locals + """Search for parameters matching a query.""" + from .params.descriptions import get_description + + query_lower = query.lower() + matches = [] + desc_matches = set() # Track which params matched via description + + for name, param in registry.all_params.items(): + name_match = query_lower in name.lower() + desc_match = False + + if search_descriptions and not name_match: + # Also search in description + desc = get_description(name) + if desc and query_lower in desc.lower(): + desc_match = True + desc_matches.add(name) + + if not name_match and not desc_match: + continue + + # Apply type filter + if type_filter and param.param_type.value != type_filter: + continue + + matches.append((name, param)) + + if not matches: + cons.print(f"[yellow]No parameters found matching '{query}'[/yellow]") + _suggest_alternatives(registry, query) + return + + # Collapse indexed parameters + collapsed = _collapse_indexed_params(matches) + + cons.print(f"[bold]Parameters matching '{query}'[/bold] ({len(matches)} params, {len(collapsed)} unique patterns)") + cons.print() + + # Show collapsed results (enable describe mode if we matched via description) + show_describe = describe or (search_descriptions and len(desc_matches) > 0) + _show_collapsed_results(collapsed[:limit], show_describe) + + if len(collapsed) > limit: + cons.print() + cons.print(f" [dim]... {len(collapsed) - limit} more patterns (use -n {len(collapsed)} to show all)[/dim]") + + +def _show_collapsed_results(collapsed, describe=False): # pylint: disable=too-many-branches + """Show collapsed search results.""" + from .params.descriptions import get_description, get_pattern_description + + # Check if any items have index ranges to show + has_ranges = any(len(item) == 4 and item[2] > 1 for item in collapsed) + + if describe: + # Description mode: one param per block with description + for item in collapsed: + name = item[0] + param = item[1] + count = item[2] + range_str = item[3] if len(item) == 4 else "" + + # Get description - use pattern description for indexed params + if "(N)" in name or "(M)" in name: + desc = get_pattern_description(name) + else: + desc = get_description(name) + + cons.print(f" [cyan]{name}[/cyan]") + cons.print(f" Type: {param.param_type.name}") + if count > 1: + cons.print(f" Count: {count} ({range_str})") + if desc: + cons.print(f" [dim]{desc}[/dim]") + cons.print() + else: + # Compact table mode + if has_ranges: + cons.print(f" {'Parameter':<40} {'Type':12} {'#':>4} {'Index Range'}") + cons.print(f" {'-'*40} {'-'*12} {'-'*4} {'-'*15}") + else: + cons.print(f" {'Parameter':<40} {'Type':12}") + cons.print(f" {'-'*40} {'-'*12}") + + for item in collapsed: + if len(item) == 4: + name, param, count, range_str = item + if count > 1: + cons.print(f" {name:<40} {param.param_type.name:12} {count:>4} {range_str}") + else: + if has_ranges: + cons.print(f" {name:<40} {param.param_type.name:12} {count:>4}") + else: + cons.print(f" {name:<40} {param.param_type.name:12}") + else: + name, param, count = item + if has_ranges: + cons.print(f" {name:<40} {param.param_type.name:12} {count:>4}") + else: + cons.print(f" {name:<40} {param.param_type.name:12}") + + +def _suggest_alternatives(registry, query): + """Suggest similar parameter names.""" + import difflib + all_names = list(registry.all_params.keys()) + suggestions = difflib.get_close_matches(query, all_names, n=5, cutoff=0.5) + + if suggestions: + cons.print() + cons.print("[yellow]Did you mean:[/yellow]") + for s in suggestions: + cons.print(f" {s}") diff --git a/toolchain/mfc/params_tests/.gitignore b/toolchain/mfc/params_tests/.gitignore new file mode 100644 index 0000000000..588fff811a --- /dev/null +++ b/toolchain/mfc/params_tests/.gitignore @@ -0,0 +1,5 @@ +# Generated data files - recreate with: python -m mfc.params_tests.runner build +data/ + +# Python cache +__pycache__/ diff --git a/toolchain/mfc/params_tests/__init__.py b/toolchain/mfc/params_tests/__init__.py new file mode 100644 index 0000000000..3e6982f8ee --- /dev/null +++ b/toolchain/mfc/params_tests/__init__.py @@ -0,0 +1,8 @@ +""" +Parameter Validation Test Infrastructure. + +This package provides tools for: +- Exporting parameter inventory +- Capturing validation snapshots +- Comparing validation behavior across refactoring +""" diff --git a/toolchain/mfc/params_tests/coverage.py b/toolchain/mfc/params_tests/coverage.py new file mode 100644 index 0000000000..5d80fbafc6 --- /dev/null +++ b/toolchain/mfc/params_tests/coverage.py @@ -0,0 +1,296 @@ +""" +Constraint Coverage Analysis Tool. + +Analyzes which validation constraints are exercised by the test cases. +This helps identify gaps in test coverage before refactoring. +""" + +import ast +import json +from pathlib import Path +from typing import Dict, List, Any +from dataclasses import dataclass + + +@dataclass +class ConstraintInfo: + """ + Information about a single constraint. + + Attributes: + method: Name of the check_* method containing this constraint. + line_number: Line number of the prohibit() call start (1-indexed). + For multi-line calls, this is the first line. + message: Error message shown when constraint is violated. + condition_code: Unparsed source code of the condition expression. + """ + method: str + line_number: int + message: str + condition_code: str + + +def _extract_message(msg_node: ast.expr) -> str: + """Extract message string from AST node.""" + if isinstance(msg_node, ast.Constant): + return msg_node.value + if isinstance(msg_node, ast.JoinedStr): + # f-string - extract the static parts + return "".join( + p.value if isinstance(p, ast.Constant) else "{...}" + for p in msg_node.values + ) + return "" + + +def _is_prohibit_call(node: ast.AST) -> bool: + """Check if node is a self.prohibit() call with enough arguments.""" + if not isinstance(node, ast.Call): + return False + if not isinstance(node.func, ast.Attribute): + return False + return node.func.attr == 'prohibit' and len(node.args) >= 2 + + +def _find_case_validator_class(tree: ast.Module) -> ast.ClassDef: + """Find the CaseValidator class in the AST.""" + for node in tree.body: + if isinstance(node, ast.ClassDef) and node.name == "CaseValidator": + return node + return None + + +def extract_constraints_from_validator() -> List[ConstraintInfo]: + """Parse case_validator.py and extract all prohibit() calls.""" + validator_path = Path(__file__).parent.parent / "case_validator.py" + + with open(validator_path, 'r', encoding='utf-8') as f: + source = f.read() + + tree = ast.parse(source) + validator_class = _find_case_validator_class(tree) + if validator_class is None: + return [] + + constraints: List[ConstraintInfo] = [] + + # Iterate through each method in the class + for item in validator_class.body: + if not isinstance(item, ast.FunctionDef): + continue + + # Walk only within this method to find prohibit() calls + for node in ast.walk(item): + if not _is_prohibit_call(node): + continue + + message = _extract_message(node.args[1]) + try: + condition_code = ast.unparse(node.args[0]) + except (ValueError, TypeError, AttributeError): + # ast.unparse can fail on malformed AST or missing attributes + condition_code = "" + + # Note: node.lineno points to the start of the prohibit() call. + # For multi-line calls, this is the first line, not where the + # condition or message appears. + constraints.append(ConstraintInfo( + method=item.name, + line_number=node.lineno, + message=message, + condition_code=condition_code + )) + + return constraints + + +def _count_prohibit_calls(func_node: ast.FunctionDef) -> int: + """Count self.prohibit() calls in a function.""" + count = 0 + for subnode in ast.walk(func_node): + if _is_prohibit_call(subnode): + count += 1 + return count + + +def extract_check_methods() -> Dict[str, Dict[str, Any]]: + """Extract all check_* methods from validator with their stage.""" + validator_path = Path(__file__).parent.parent / "case_validator.py" + + with open(validator_path, 'r', encoding='utf-8') as f: + source = f.read() + + methods = {} + tree = ast.parse(source) + validator_class = _find_case_validator_class(tree) + + if validator_class is None: + return methods + + for item in validator_class.body: + if not isinstance(item, ast.FunctionDef): + continue + if not item.name.startswith("check_"): + continue + + docstring = ast.get_docstring(item) or "" + methods[item.name] = { + "line_number": item.lineno, + "docstring": docstring.split('\n')[0] if docstring else "", + "prohibit_count": _count_prohibit_calls(item), + } + + return methods + + +_VALIDATE_METHOD_TO_STAGE = { + "validate_common": "common", + "validate_pre_process": "pre_process", + "validate_simulation": "simulation", + "validate_post_process": "post_process", +} + + +def _find_check_calls(func_node: ast.FunctionDef) -> List[str]: + """Find all self.check_* method calls in a function.""" + calls = [] + for subnode in ast.walk(func_node): + if not isinstance(subnode, ast.Call): + continue + if not isinstance(subnode.func, ast.Attribute): + continue + if subnode.func.attr.startswith("check_"): + calls.append(subnode.func.attr) + return calls + + +def extract_validate_dispatch() -> Dict[str, List[str]]: + """Extract which check methods are called for each stage.""" + validator_path = Path(__file__).parent.parent / "case_validator.py" + + with open(validator_path, 'r', encoding='utf-8') as f: + source = f.read() + + dispatch = {stage: [] for stage in _VALIDATE_METHOD_TO_STAGE.values()} + tree = ast.parse(source) + validator_class = _find_case_validator_class(tree) + + if validator_class is None: + return dispatch + + for item in validator_class.body: + if not isinstance(item, ast.FunctionDef): + continue + stage = _VALIDATE_METHOD_TO_STAGE.get(item.name) + if stage is None: + continue + dispatch[stage].extend(_find_check_calls(item)) + + return dispatch + + +def generate_coverage_report() -> Dict[str, Any]: + """Generate a comprehensive coverage report.""" + constraints = extract_constraints_from_validator() + methods = extract_check_methods() + dispatch = extract_validate_dispatch() + + # Group constraints by method + by_method = {} + for c in constraints: + if c.method not in by_method: + by_method[c.method] = [] + by_method[c.method].append({ + "line": c.line_number, + "message": c.message, + "condition": c.condition_code[:80] + "..." if len(c.condition_code) > 80 else c.condition_code + }) + + # Calculate coverage per stage + stage_coverage = {} + for stage, check_methods in dispatch.items(): + total_constraints = 0 + for method_name in check_methods: + if method_name in methods: + total_constraints += methods[method_name]["prohibit_count"] + stage_coverage[stage] = { + "methods": check_methods, + "method_count": len(check_methods), + "constraint_count": total_constraints, + } + + # Add common constraints to all stages + common_constraints = stage_coverage.get("common", {}).get("constraint_count", 0) + for stage in ["pre_process", "simulation", "post_process"]: + if stage in stage_coverage: + stage_coverage[stage]["total_with_common"] = ( + stage_coverage[stage]["constraint_count"] + common_constraints + ) + + return { + "summary": { + "total_constraints": len(constraints), + "total_check_methods": len(methods), + "methods_with_most_constraints": sorted( + [(name, info["prohibit_count"]) for name, info in methods.items()], + key=lambda x: -x[1] + )[:10], + }, + "stage_coverage": stage_coverage, + "methods": methods, + "constraints_by_method": by_method, + } + + +def print_coverage_report(): + """Print coverage report to console.""" + report = generate_coverage_report() + + print("=" * 70) + print("MFC Case Validator Constraint Coverage Report") + print("=" * 70) + + print(f"\nTotal constraints (self.prohibit calls): {report['summary']['total_constraints']}") + print(f"Total check methods: {report['summary']['total_check_methods']}") + + print("\nMethods with most constraints:") + for method, count in report['summary']['methods_with_most_constraints']: + print(f" {method}: {count} constraints") + + print("\nConstraints by stage:") + for stage, info in report['stage_coverage'].items(): + total = info.get('total_with_common', info['constraint_count']) + print(f" {stage}:") + print(f" Methods: {info['method_count']}") + print(f" Constraints: {info['constraint_count']} (+ common = {total})") + + print("\n" + "=" * 70) + print("Detailed constraint listing (top methods):") + print("=" * 70) + + for method, count in report['summary']['methods_with_most_constraints'][:5]: + print(f"\n{method} ({count} constraints):") + if method in report['constraints_by_method']: + for c in report['constraints_by_method'][method][:5]: + print(f" L{c['line']}: {c['message'][:60]}") + if len(report['constraints_by_method'][method]) > 5: + print(f" ... and {len(report['constraints_by_method'][method]) - 5} more") + + +def save_coverage_report(output_path: Path = None): + """Save coverage report to JSON file.""" + if output_path is None: + output_path = Path(__file__).parent / "constraint_coverage.json" + + report = generate_coverage_report() + + with open(output_path, 'w', encoding='utf-8') as f: + json.dump(report, f, indent=2) + + return output_path + + +if __name__ == "__main__": + print_coverage_report() + path = save_coverage_report() + print(f"\nReport saved to: {path}") diff --git a/toolchain/mfc/params_tests/inventory.py b/toolchain/mfc/params_tests/inventory.py new file mode 100644 index 0000000000..96a88d124d --- /dev/null +++ b/toolchain/mfc/params_tests/inventory.py @@ -0,0 +1,151 @@ +""" +Parameter Inventory Export Tool. + +Exports all MFC parameters with their types and tags to JSON for analysis. +""" + +import re +import json +from pathlib import Path +from typing import Dict, Any + +from ..run.case_dicts import ALL +from ..params import REGISTRY +from ..params.schema import ParamType + + +def get_param_type_name(param_type) -> str: + """Convert ParamType to string name.""" + if isinstance(param_type, ParamType): + return param_type.name + return "UNKNOWN" + + +def export_parameter_inventory() -> Dict[str, Any]: + """Export complete parameter inventory with metadata.""" + # Count by type + by_type = { + "INT": [], + "REAL": [], + "LOG": [], + "STR": [], + "ANALYTIC_INT": [], + "ANALYTIC_REAL": [], + } + + # Count by tag + by_tag = {} + for tag in REGISTRY.get_all_tags(): + by_tag[tag] = [] + + inventory = { + "metadata": { + "total_parameters": len(ALL), + }, + "parameters": {}, + "by_type": by_type, + "by_tag": by_tag, + } + + for param_name, param_type in sorted(ALL.items()): + type_name = get_param_type_name(param_type) + param = REGISTRY.all_params.get(param_name) + + param_info = { + "type": type_name, + "tags": sorted(param.tags) if param else [], + } + + # Detect pattern-based parameters + if "(" in param_name: + # Extract pattern (e.g., "patch_icpp(1)%x_centroid" -> "patch_icpp({id})%x_centroid") + param_pattern = re.sub(r'\((\d+)\)', r'({id})', param_name) + param_pattern = re.sub(r'\((\d+),\s*(\d+)\)', r'({id1}, {id2})', param_pattern) + param_info["pattern"] = param_pattern + + inventory["parameters"][param_name] = param_info + + # Categorize by type + if type_name in by_type: + by_type[type_name].append(param_name) + + # Categorize by tag + if param: + for tag in param.tags: + if tag in by_tag: + by_tag[tag].append(param_name) + + return inventory + + +def export_parameter_patterns() -> Dict[str, Any]: + """Extract unique parameter patterns (for dynamic parameters).""" + patterns = {} + for param_name, param_type in ALL.items(): + if "(" not in param_name: + continue + + # Normalize the pattern + normalized = re.sub(r'\((\d+)\)', r'({N})', param_name) + normalized = re.sub(r'\((\d+),\s*(\d+)\)', r'({N}, {M})', normalized) + + if normalized not in patterns: + patterns[normalized] = { + "examples": [], + "type": get_param_type_name(param_type), + "count": 0 + } + patterns[normalized]["examples"].append(param_name) + patterns[normalized]["count"] += 1 + + # Trim examples to max 3 + for pattern_data in patterns.values(): + pattern_data["examples"] = pattern_data["examples"][:3] + + return patterns + + +def save_inventory(output_path: Path = None): + """Save parameter inventory to JSON file.""" + if output_path is None: + output_path = Path(__file__).parent / "param_inventory.json" + + inventory = export_parameter_inventory() + inventory["patterns"] = export_parameter_patterns() + + with open(output_path, 'w', encoding='utf-8') as f: + json.dump(inventory, f, indent=2) + + return output_path + + +def print_inventory_summary(): + """Print a summary of the parameter inventory.""" + inventory = export_parameter_inventory() + patterns = export_parameter_patterns() + + print("=" * 60) + print("MFC Parameter Inventory Summary") + print("=" * 60) + print(f"Total parameters: {inventory['metadata']['total_parameters']}") + print() + print("By type:") + for type_name, params in inventory["by_type"].items(): + print(f" - {type_name}: {len(params)}") + print() + print("By feature tag:") + for tag, params in sorted(inventory["by_tag"].items()): + if params: + print(f" - {tag}: {len(params)}") + print() + print(f"Dynamic parameter patterns: {len(patterns)}") + print("Top patterns:") + sorted_patterns = sorted(patterns.items(), key=lambda x: -x[1]["count"])[:10] + for pattern, info in sorted_patterns: + print(f" - {pattern}: {info['count']} instances ({info['type']})") + + +if __name__ == "__main__": + print_inventory_summary() + path = save_inventory() + print(f"\nInventory saved to: {path}") diff --git a/toolchain/mfc/params_tests/mutation_tests.py b/toolchain/mfc/params_tests/mutation_tests.py new file mode 100644 index 0000000000..15bbfa0885 --- /dev/null +++ b/toolchain/mfc/params_tests/mutation_tests.py @@ -0,0 +1,279 @@ +""" +Mutation Testing for Validator Coverage. + +Takes valid example cases and systematically mutates parameters +to verify the validator catches invalid configurations. +""" + +import json +import subprocess +from pathlib import Path +from typing import Dict, Any, List, Tuple +from dataclasses import dataclass + +from ..case_validator import CaseValidator + + +@dataclass +class MutationResult: + """Result of a mutation test.""" + case_name: str + param_name: str + original_value: Any + mutated_value: Any + validator_caught: bool + errors: List[str] + + +# Mutations to apply to parameters +MUTATIONS = { + # === BASIC NUMERIC PARAMETERS === + "m": [0, -1, None], + "n": [-1, -10], + "p": [-1, -5], + "dt": [0, -1e-6, None], + "t_step_start": [-1], + "t_step_stop": [-1], + "t_step_save": [0, -1], + "num_fluids": [0, -1], + "num_patches": [0, -1], + "model_eqns": [0, 5, 10, -1], + "weno_order": [0, 2, 4, 6, 8], + "time_stepper": [0, 6, -1], + "riemann_solver": [0, 10, -1], + + # === BOOLEAN PARAMETERS (Fortran logicals) === + "bubbles_euler": ["X", "yes", "1"], + "mpp_lim": ["X", "yes"], + "cyl_coord": ["X", "maybe"], + + # === BOUNDARY CONDITIONS === + "bc_x%beg": [None, 100, -100], + "bc_x%end": [None, 100, -100], + "bc_y%beg": [100, -100], + "bc_y%end": [100, -100], + + # === DOMAIN PARAMETERS === + "x_domain%beg": [None], + "x_domain%end": [None], + + # === PHYSICS: THERMODYNAMICS === + # gamma must be > 1 for physical gases (gamma = Cp/Cv) + # In MFC, fluid_pp(i)%gamma stores 1/(gamma-1), so it must be > 0 + "fluid_pp(1)%gamma": [0, -1, -0.5], + + # pi_inf (stiffness) must be >= 0 for stiffened gas EOS + "fluid_pp(1)%pi_inf": [-1, -1e6], + + # === PHYSICS: PATCH INITIAL CONDITIONS === + # Pressure must be positive + "patch_icpp(1)%pres": [0, -1, -1e5], + + # Density (alpha_rho) must be non-negative (0 allowed for vacuum) + "patch_icpp(1)%alpha_rho(1)": [-1, -1000], + + # Volume fraction must be in [0, 1] + "patch_icpp(1)%alpha(1)": [-0.1, 1.5, 2.0], + + # === PHYSICS: GEOMETRY === + # Patch dimensions must be positive + "patch_icpp(1)%length_x": [0, -1, -10], + "patch_icpp(1)%length_y": [0, -1], + "patch_icpp(1)%length_z": [0, -1], + "patch_icpp(1)%radius": [0, -1], + + # === PHYSICS: BUBBLES === + # Bubble radius must be positive + "patch_icpp(1)%r0": [0, -1], + # Number of bubble bins must be positive + "nb": [0, -1], + # Bubble reference parameters must be positive + "bub_pp%R0ref": [0, -1], + "bub_pp%p0ref": [0, -1], + "bub_pp%rho0ref": [0, -1], + "bub_pp%T0ref": [0, -1], + # Bubble viscosities must be non-negative + "bub_pp%mu_l": [-1, -1e-3], + "bub_pp%mu_g": [-1, -1e-3], + # Surface tension must be non-negative + "bub_pp%ss": [-1, -0.01], + # Global bubble reference values + "rhoref": [0, -1, -1000], + "pref": [0, -1, -1e5], + + # === PHYSICS: ACOUSTICS === + # Frequency/wavelength must be positive + "acoustic(1)%frequency": [0, -1], + "acoustic(1)%wavelength": [0, -1], + "acoustic(1)%gauss_sigma_time": [0, -1], + "acoustic(1)%gauss_sigma_dist": [0, -1], + + # === NUMERICS === + # CFL target should be in (0, 1] + "cfl_target": [-0.1, 0, 1.5, 2.0], + + # WENO epsilon must be positive (small regularization) + "weno_eps": [0, -1e-6], +} + + +def load_example_case(case_path: Path) -> Dict[str, Any]: + """Load parameters from an example case file.""" + result = subprocess.run( + ["python3", str(case_path)], + capture_output=True, + text=True, + cwd=case_path.parent, + timeout=30, + check=False + ) + if result.returncode != 0: + return None + return json.loads(result.stdout.strip()) + + +def run_mutation(params: Dict[str, Any], param_name: str, + mutated_value: Any) -> Tuple[bool, List[str]]: + """Apply mutation and check if validator catches it.""" + mutated_params = params.copy() + + if mutated_value is None: + # Remove the parameter + mutated_params.pop(param_name, None) + else: + mutated_params[param_name] = mutated_value + + validator = CaseValidator(mutated_params) + + try: + validator.validate_pre_process() + except Exception: + pass + + try: + validator.validate_simulation() + except Exception: + pass + + try: + validator.validate_post_process() + except Exception: + pass + + return len(validator.errors) > 0, validator.errors + + +def run_mutations_on_case(case_name: str, params: Dict[str, Any]) -> List[MutationResult]: + """Run all applicable mutations on a case.""" + results = [] + + for param_name, mutations in MUTATIONS.items(): + if param_name not in params: + continue + + original = params[param_name] + + for mutated_value in mutations: + # Skip if mutation is same as original + if mutated_value == original: + continue + + caught, errors = run_mutation(params, param_name, mutated_value) + + results.append(MutationResult( + case_name=case_name, + param_name=param_name, + original_value=original, + mutated_value=mutated_value, + validator_caught=caught, + errors=errors[:3] # Limit for memory + )) + + return results + + +def run_mutation_tests(max_cases: int = 10) -> Dict[str, Any]: + """Run mutation tests on example cases.""" + examples_dir = Path(__file__).parent.parent.parent.parent / "examples" + case_files = sorted(examples_dir.glob("**/case.py"))[:max_cases] + + all_results = [] + cases_tested = 0 + + for case_file in case_files: + case_name = str(case_file.relative_to(examples_dir).parent) + params = load_example_case(case_file) + + if params is None: + continue + + cases_tested += 1 + results = run_mutations_on_case(case_name, params) + all_results.extend(results) + + # Summarize + total = len(all_results) + caught = sum(1 for r in all_results if r.validator_caught) + missed = sum(1 for r in all_results if not r.validator_caught) + + # Group by parameter + by_param = {} + for r in all_results: + if r.param_name not in by_param: + by_param[r.param_name] = {"caught": 0, "missed": 0} + if r.validator_caught: + by_param[r.param_name]["caught"] += 1 + else: + by_param[r.param_name]["missed"] += 1 + + return { + "cases_tested": cases_tested, + "total_mutations": total, + "caught": caught, + "missed": missed, + "catch_rate": caught / total * 100 if total > 0 else 0, + "by_param": by_param, + "missed_details": [r for r in all_results if not r.validator_caught][:20], + } + + +def print_mutation_report(): + """Print mutation test results.""" + print("Running mutation tests on example cases...") + print("(This tests that the validator catches invalid parameter values)") + print() + + results = run_mutation_tests(max_cases=20) + + print("=" * 70) + print("MUTATION TEST RESULTS") + print("=" * 70) + print(f"\nCases tested: {results['cases_tested']}") + print(f"Total mutations: {results['total_mutations']}") + print(f"Caught by validator: {results['caught']}") + print(f"Missed by validator: {results['missed']}") + print(f"Catch rate: {results['catch_rate']:.1f}%") + + print("\n" + "-" * 70) + print("BY PARAMETER:") + print("-" * 70) + for param, data in sorted(results["by_param"].items(), + key=lambda x: -x[1]["missed"]): + total = data["caught"] + data["missed"] + rate = data["caught"] / total * 100 if total > 0 else 0 + status = "OK" if data["missed"] == 0 else "GAPS" + print(f" {param}: {data['caught']}/{total} caught ({rate:.0f}%) [{status}]") + + if results["missed_details"]: + print("\n" + "-" * 70) + print("SAMPLE OF UNCAUGHT MUTATIONS (potential validator gaps):") + print("-" * 70) + for r in results["missed_details"][:10]: + print(f" {r.case_name}") + print(f" {r.param_name}: {r.original_value} -> {r.mutated_value}") + print(f" No validation error raised!") + print() + + +if __name__ == "__main__": + print_mutation_report() diff --git a/toolchain/mfc/params_tests/negative_tests.py b/toolchain/mfc/params_tests/negative_tests.py new file mode 100644 index 0000000000..683c3f41c3 --- /dev/null +++ b/toolchain/mfc/params_tests/negative_tests.py @@ -0,0 +1,358 @@ +""" +Negative Test Case Generator. + +Generates test cases that intentionally violate validator constraints +to ensure each constraint is properly enforced. +""" + +from typing import Dict, Any, List +from dataclasses import dataclass + +from ..case_validator import CaseValidator + + +@dataclass +class ConstraintTest: + """A test case for a specific constraint.""" + method: str + line_number: int + message: str + condition: str + test_params: Dict[str, Any] + should_trigger: bool = True + + +# Base valid case - starts from a known-good configuration +BASE_CASE = { + "m": 50, + "n": 0, + "p": 0, + "model_eqns": 2, + "num_fluids": 1, + "num_patches": 1, + "t_step_start": 0, + "t_step_stop": 100, + "t_step_save": 10, + "dt": 1e-6, + "weno_order": 5, + "bc_x%beg": -1, + "bc_x%end": -1, + "x_domain%beg": 0.0, + "x_domain%end": 1.0, + "patch_icpp(1)%geometry": 1, + "patch_icpp(1)%x_centroid": 0.5, + "patch_icpp(1)%length_x": 1.0, + "patch_icpp(1)%vel(1)": 0.0, + "patch_icpp(1)%pres": 1.0, + "patch_icpp(1)%alpha_rho(1)": 1.0, + "patch_icpp(1)%alpha(1)": 1.0, + "fluid_pp(1)%gamma": 0.4, + "fluid_pp(1)%pi_inf": 0.0, +} + + +def generate_constraint_tests() -> List[ConstraintTest]: + """Generate test cases for each constraint in case_validator.py.""" + tests = [] + + # =================================================================== + # check_simulation_domain constraints + # =================================================================== + tests.extend([ + ConstraintTest( + method="check_simulation_domain", + line_number=56, + message="m must be set", + condition="m is None", + test_params={**BASE_CASE, "m": None}, + ), + ConstraintTest( + method="check_simulation_domain", + line_number=57, + message="m must be positive", + condition="m <= 0", + test_params={**BASE_CASE, "m": 0}, + ), + ConstraintTest( + method="check_simulation_domain", + line_number=57, + message="m must be positive", + condition="m <= 0", + test_params={**BASE_CASE, "m": -5}, + ), + ConstraintTest( + method="check_simulation_domain", + line_number=58, + message="n must be non-negative", + condition="n < 0", + test_params={**BASE_CASE, "n": -1}, + ), + ConstraintTest( + method="check_simulation_domain", + line_number=59, + message="p must be non-negative", + condition="p < 0", + test_params={**BASE_CASE, "p": -1}, + ), + ConstraintTest( + method="check_simulation_domain", + line_number=60, + message="p must be odd for cylindrical coordinates", + condition="cyl_coord and p > 0 and p % 2 == 0", + test_params={**BASE_CASE, "cyl_coord": "T", "n": 10, "p": 2}, + ), + ConstraintTest( + method="check_simulation_domain", + line_number=62, + message="p must be 0 if n = 0", + condition="n == 0 and p > 0", + test_params={**BASE_CASE, "n": 0, "p": 5}, + ), + ]) + + # =================================================================== + # check_model_eqns_and_num_fluids constraints + # =================================================================== + tests.extend([ + ConstraintTest( + method="check_model_eqns_and_num_fluids", + line_number=73, + message="model_eqns must be 1, 2, 3, or 4", + condition="model_eqns not in [1, 2, 3, 4]", + test_params={**BASE_CASE, "model_eqns": 5}, + ), + ConstraintTest( + method="check_model_eqns_and_num_fluids", + line_number=75, + message="num_fluids must be positive", + condition="num_fluids < 1", + test_params={**BASE_CASE, "num_fluids": 0}, + ), + ConstraintTest( + method="check_model_eqns_and_num_fluids", + line_number=85, + message="model_eqns = 1 does not support mpp_lim", + condition="model_eqns == 1 and mpp_lim", + test_params={**BASE_CASE, "model_eqns": 1, "num_fluids": None, "mpp_lim": "T"}, + ), + ConstraintTest( + method="check_model_eqns_and_num_fluids", + line_number=87, + message="num_fluids = 1 does not support mpp_lim", + condition="num_fluids == 1 and mpp_lim", + test_params={**BASE_CASE, "num_fluids": 1, "mpp_lim": "T"}, + ), + ]) + + # =================================================================== + # check_time_stepping constraints + # =================================================================== + tests.extend([ + ConstraintTest( + method="check_time_stepping", + line_number=0, # Will be determined + message="dt must be positive", + condition="dt <= 0", + test_params={**BASE_CASE, "dt": 0}, + ), + ConstraintTest( + method="check_time_stepping", + line_number=0, + message="dt must be positive", + condition="dt <= 0", + test_params={**BASE_CASE, "dt": -1e-6}, + ), + ConstraintTest( + method="check_time_stepping", + line_number=0, + message="t_step_stop must be >= t_step_start", + condition="t_step_stop < t_step_start", + test_params={**BASE_CASE, "t_step_start": 100, "t_step_stop": 50}, + ), + ]) + + # =================================================================== + # check_weno constraints + # =================================================================== + tests.extend([ + ConstraintTest( + method="check_weno_simulation", + line_number=0, + message="weno_order must be 1, 3, 5, or 7", + condition="weno_order not in [1, 3, 5, 7]", + test_params={**BASE_CASE, "weno_order": 4}, + ), + ConstraintTest( + method="check_weno_simulation", + line_number=0, + message="weno_order must be 1, 3, 5, or 7", + condition="weno_order not in [1, 3, 5, 7]", + test_params={**BASE_CASE, "weno_order": 9}, + ), + ]) + + # =================================================================== + # check_boundary_conditions constraints + # =================================================================== + tests.extend([ + ConstraintTest( + method="check_boundary_conditions", + line_number=0, + message="bc_x%beg must be set", + condition="bc_x%beg is None", + test_params={**BASE_CASE, "bc_x%beg": None}, + ), + ConstraintTest( + method="check_boundary_conditions", + line_number=0, + message="bc_x%end must be set", + condition="bc_x%end is None", + test_params={**BASE_CASE, "bc_x%end": None}, + ), + ]) + + # =================================================================== + # check_bubbles constraints + # =================================================================== + bubble_case = {**BASE_CASE, "bubbles_euler": "T", "bubble_model": 2, "nb": 1} + tests.extend([ + ConstraintTest( + method="check_bubbles_euler", + line_number=0, + message="nb must be >= 1", + condition="bubbles_euler and nb < 1", + test_params={**bubble_case, "nb": 0}, + ), + ]) + + # =================================================================== + # check_acoustic_source constraints (the biggest method) + # =================================================================== + tests.extend([ + ConstraintTest( + method="check_acoustic_source", + line_number=0, + message="num_source must be positive when acoustic_source is enabled", + condition="acoustic_source and num_source < 1", + test_params={**BASE_CASE, "acoustic_source": "T", "num_source": 0}, + ), + ]) + + return tests + + +def _message_matches(expected: str, actual_errors: List[str]) -> bool: + """Check if expected message matches any actual error (fuzzy).""" + expected_lower = expected.lower() + # Extract key terms from expected message + key_terms = [w for w in expected_lower.split() if len(w) > 3] + + for err in actual_errors: + err_lower = err.lower() + # Check if most key terms appear in the error + matches = sum(1 for term in key_terms if term in err_lower) + if matches >= len(key_terms) * 0.5: # 50% of terms match + return True + return False + + +def run_constraint_tests() -> Dict[str, Any]: + """Run all constraint tests and return results.""" + tests = generate_constraint_tests() + results = { + "total": len(tests), + "passed": 0, + "failed": 0, + "errors_triggered": 0, + "details": [], + } + + for test in tests: + validator = CaseValidator(test.test_params) + + # Run validation for all stages + try: + validator.validate_pre_process() + except Exception: + pass + + try: + validator.validate_simulation() + except Exception: + pass + + # Check if any error was triggered (the key metric) + any_error = len(validator.errors) > 0 + message_matched = _message_matches(test.message, validator.errors) + + if any_error: + results["errors_triggered"] += 1 + + if any_error == test.should_trigger: + results["passed"] += 1 + status = "PASS" + else: + results["failed"] += 1 + status = "FAIL" + + results["details"].append({ + "method": test.method, + "message": test.message, + "status": status, + "expected_trigger": test.should_trigger, + "any_error": any_error, + "message_matched": message_matched, + "all_errors": validator.errors[:3], # Limit for display + }) + + return results + + +def print_test_report(): + """Print test results to console.""" + results = run_constraint_tests() + + print("=" * 70) + print("Constraint Validation Negative Tests") + print("=" * 70) + print(f"\nTotal tests: {results['total']}") + print(f"Errors triggered: {results['errors_triggered']}/{results['total']}") + print(f"Passed: {results['passed']}") + print(f"Failed: {results['failed']}") + + # Group by method + by_method = {} + for detail in results["details"]: + method = detail["method"] + if method not in by_method: + by_method[method] = {"passed": 0, "failed": 0, "tests": []} + if detail["status"] == "PASS": + by_method[method]["passed"] += 1 + else: + by_method[method]["failed"] += 1 + by_method[method]["tests"].append(detail) + + print("\nResults by method:") + for method, data in sorted(by_method.items()): + status = "OK" if data["failed"] == 0 else "ISSUES" + print(f" {method}: {data['passed']}/{data['passed']+data['failed']} [{status}]") + + if results["failed"] > 0: + print("\nFailed tests (constraint not triggering as expected):") + for detail in results["details"]: + if detail["status"] == "FAIL": + print(f"\n {detail['method']}") + print(f" Expected: {detail['message']}") + print(f" Got errors: {detail['any_error']}") + if detail['all_errors']: + for err in detail['all_errors'][:2]: + print(f" - {err[:60]}...") + + print("\n" + "=" * 70) + error_rate = results["errors_triggered"] / results["total"] * 100 + print(f"Error trigger rate: {error_rate:.1f}% ({results['errors_triggered']}/{results['total']} tests triggered errors)") + print("=" * 70) + + +if __name__ == "__main__": + print_test_report() diff --git a/toolchain/mfc/params_tests/runner.py b/toolchain/mfc/params_tests/runner.py new file mode 100644 index 0000000000..cc980cfbcc --- /dev/null +++ b/toolchain/mfc/params_tests/runner.py @@ -0,0 +1,258 @@ +""" +Test Safety Net Runner. + +Main entry point for building and verifying the parameter validation test suite. +""" +# pylint: disable=import-outside-toplevel + +import sys +import json +import argparse +from pathlib import Path + +from .inventory import ( + export_parameter_inventory, + save_inventory, + print_inventory_summary +) +from .snapshot import ( + capture_all_examples, + save_snapshots, + load_snapshots, + compare_snapshots, + print_comparison_report +) +from .coverage import ( + generate_coverage_report, + print_coverage_report, + save_coverage_report +) + + +def get_data_dir() -> Path: + """Get the directory for storing test data.""" + data_dir = Path(__file__).parent / "data" + data_dir.mkdir(exist_ok=True) + return data_dir + + +def build_safety_net(verbose: bool = True): + """ + Build the complete test safety net. + + This captures: + 1. Parameter inventory + 2. Validation snapshots from all examples + 3. Constraint coverage analysis + """ + data_dir = get_data_dir() + + if verbose: + print("=" * 70) + print("Building Parameter Validation Safety Net") + print("=" * 70) + + # 1. Parameter inventory + if verbose: + print("\n[1/3] Exporting parameter inventory...") + inventory_path = data_dir / "param_inventory.json" + save_inventory(inventory_path) + inventory = export_parameter_inventory() + if verbose: + print(f" Total parameters: {inventory['metadata']['total_parameters']}") + print(f" Saved to: {inventory_path}") + + # 2. Validation snapshots + if verbose: + print("\n[2/3] Capturing validation snapshots from examples...") + snapshots = capture_all_examples() + snapshots_path = data_dir / "validation_snapshots.json" + save_snapshots(snapshots, snapshots_path) + load_errors = sum(1 for s in snapshots.values() if s.load_error) + if verbose: + print(f" Total cases: {len(snapshots)}") + print(f" Load errors: {load_errors}") + print(f" Saved to: {snapshots_path}") + + # 3. Constraint coverage + if verbose: + print("\n[3/3] Analyzing constraint coverage...") + coverage_path = data_dir / "constraint_coverage.json" + save_coverage_report(coverage_path) + coverage = generate_coverage_report() + if verbose: + print(f" Total constraints: {coverage['summary']['total_constraints']}") + print(f" Check methods: {coverage['summary']['total_check_methods']}") + print(f" Saved to: {coverage_path}") + + if verbose: + print("\n" + "=" * 70) + print("Safety net built successfully!") + print("=" * 70) + print(f"\nData stored in: {data_dir}") + print("\nFiles created:") + print(f" - param_inventory.json ({inventory['metadata']['total_parameters']} params)") + print(f" - validation_snapshots.json ({len(snapshots)} cases)") + print(f" - constraint_coverage.json ({coverage['summary']['total_constraints']} constraints)") + + return { + "inventory": inventory, + "snapshots": snapshots, + "coverage": coverage, + } + + +def _print_if(verbose: bool, *args, **kwargs): + """Print only if verbose mode is enabled.""" + if verbose: + print(*args, **kwargs) + + +def _print_changes_report(differences: dict, verbose: bool): + """Print report when validation has changed.""" + if not verbose: + return + print("\n" + "=" * 70) + print("VALIDATION CHANGED!") + print("=" * 70) + if differences['changed_validation']: + print(f" {len(differences['changed_validation'])} cases have different validation results") + if differences['removed_cases']: + print(f" {len(differences['removed_cases'])} cases were removed") + print("\nIf this is expected, run 'build' to update the safety net.") + + +def verify_safety_net(verbose: bool = True) -> bool: + """ + Verify that current validation matches the captured safety net. + + Returns True if validation is unchanged, False if there are differences. + """ + data_dir = get_data_dir() + snapshots_path = data_dir / "validation_snapshots.json" + + if not snapshots_path.exists(): + _print_if(verbose, "ERROR: Safety net not found. Run 'build' first.") + return False + + _print_if(verbose, "=" * 70) + _print_if(verbose, "Verifying Parameter Validation Against Safety Net") + _print_if(verbose, "=" * 70) + + _print_if(verbose, "\nLoading saved snapshots...") + old_snapshots = load_snapshots(snapshots_path) + _print_if(verbose, f" Loaded {len(old_snapshots.get('snapshots', {}))} cases") + + _print_if(verbose, "\nCapturing current validation results...") + new_snapshots = capture_all_examples() + _print_if(verbose, f" Captured {len(new_snapshots)} cases") + + _print_if(verbose, "\nComparing results...") + differences = compare_snapshots(old_snapshots, new_snapshots) + + if verbose: + print_comparison_report(differences) + + has_changes = bool(differences['changed_validation'] or differences['removed_cases']) + if has_changes: + _print_changes_report(differences, verbose) + return False + + _print_if(verbose, "\n" + "=" * 70) + _print_if(verbose, "VALIDATION UNCHANGED - All tests pass!") + _print_if(verbose, "=" * 70) + + return True + + +def show_summary(): + """Show summary of captured safety net data.""" + data_dir = get_data_dir() + + print("=" * 70) + print("Parameter Validation Safety Net Summary") + print("=" * 70) + + # Inventory + inventory_path = data_dir / "param_inventory.json" + if inventory_path.exists(): + with open(inventory_path) as f: + inventory = json.load(f) + print("\nParameter Inventory:") + print(f" Total parameters: {inventory['metadata']['total_parameters']}") + print(f" By stage:") + print(f" Common: {inventory['metadata']['common_count']}") + print(f" Pre-process: {inventory['metadata']['pre_process_count']}") + print(f" Simulation: {inventory['metadata']['simulation_count']}") + print(f" Post-process: {inventory['metadata']['post_process_count']}") + else: + print("\nParameter Inventory: NOT FOUND") + + # Snapshots + snapshots_path = data_dir / "validation_snapshots.json" + if snapshots_path.exists(): + with open(snapshots_path) as f: + snapshots = json.load(f) + print("\nValidation Snapshots:") + print(f" Total cases: {snapshots['metadata']['total_cases']}") + print(f" Load errors: {snapshots['metadata']['load_errors']}") + print(f" Validation errors: {snapshots['metadata']['validation_errors']}") + else: + print("\nValidation Snapshots: NOT FOUND") + + # Coverage + coverage_path = data_dir / "constraint_coverage.json" + if coverage_path.exists(): + with open(coverage_path) as f: + coverage = json.load(f) + print("\nConstraint Coverage:") + print(f" Total constraints: {coverage['summary']['total_constraints']}") + print(f" Check methods: {coverage['summary']['total_check_methods']}") + print(" Top methods by constraint count:") + for method, count in coverage['summary']['methods_with_most_constraints'][:5]: + print(f" {method}: {count}") + else: + print("\nConstraint Coverage: NOT FOUND") + + +def main(): + """Main entry point for command-line usage.""" + parser = argparse.ArgumentParser( + description="Parameter Validation Test Safety Net" + ) + parser.add_argument( + "command", + choices=["build", "verify", "summary", "inventory", "coverage", + "negative", "mutation"], + help="Command to run" + ) + parser.add_argument( + "-q", "--quiet", + action="store_true", + help="Reduce output verbosity" + ) + + args = parser.parse_args() + verbose = not args.quiet + + if args.command == "build": + build_safety_net(verbose=verbose) + elif args.command == "verify": + success = verify_safety_net(verbose=verbose) + sys.exit(0 if success else 1) + elif args.command == "summary": + show_summary() + elif args.command == "inventory": + print_inventory_summary() + elif args.command == "coverage": + print_coverage_report() + elif args.command == "negative": + from .negative_tests import print_test_report + print_test_report() + elif args.command == "mutation": + from .mutation_tests import print_mutation_report + print_mutation_report() + + +if __name__ == "__main__": + main() diff --git a/toolchain/mfc/params_tests/snapshot.py b/toolchain/mfc/params_tests/snapshot.py new file mode 100644 index 0000000000..c5c54f3112 --- /dev/null +++ b/toolchain/mfc/params_tests/snapshot.py @@ -0,0 +1,305 @@ +""" +Validation Snapshot Tool. + +Captures validation results from case files for regression testing. +This allows us to verify that refactoring doesn't change validation behavior. +""" + +import json +import hashlib +import subprocess +from pathlib import Path +from typing import Dict, Any, List, Optional +from dataclasses import dataclass, asdict + +from ..case_validator import CaseValidator + + +@dataclass +class ValidationResult: + """Result of validating a single case file for a single stage.""" + case_path: str + stage: str + success: bool + errors: List[str] + param_hash: str # Hash of parameters for change detection + error_count: int + + +@dataclass +class CaseSnapshot: + """Complete validation snapshot for a case file.""" + case_path: str + param_count: int + param_hash: str + stages: Dict[str, ValidationResult] + load_error: Optional[str] = None + + +def hash_params(params: Dict[str, Any]) -> str: + """Create a hash of parameters for change detection.""" + # Sort keys for consistent hashing + sorted_items = sorted(params.items(), key=lambda x: x[0]) + param_str = json.dumps(sorted_items, sort_keys=True, default=str) + return hashlib.md5(param_str.encode()).hexdigest()[:12] + + +def validate_case_for_stage(params: Dict[str, Any], stage: str) -> ValidationResult: + """Run validation for a specific stage and capture results.""" + validator = CaseValidator(params) + + try: + if stage == "pre_process": + validator.validate_pre_process() + elif stage == "simulation": + validator.validate_simulation() + elif stage == "post_process": + validator.validate_post_process() + else: + raise ValueError(f"Unknown stage: {stage}") + + return ValidationResult( + case_path="", # Will be filled in by caller + stage=stage, + success=len(validator.errors) == 0, + errors=validator.errors.copy(), + param_hash=hash_params(params), + error_count=len(validator.errors) + ) + except (ValueError, KeyError, TypeError, AttributeError) as e: + # Catch expected validation errors, not programming bugs like SystemExit + return ValidationResult( + case_path="", + stage=stage, + success=False, + errors=[f"Exception during validation: {type(e).__name__}: {str(e)}"], + param_hash=hash_params(params), + error_count=1 + ) + + +def load_case_params(case_path: Path) -> Dict[str, Any]: + """Load parameters from a case file by running it and capturing JSON output.""" + # MFC case files print JSON to stdout when run + result = subprocess.run( + ["python3", str(case_path)], + capture_output=True, + text=True, + cwd=case_path.parent, + timeout=30, + check=False + ) + + if result.returncode != 0: + raise ValueError(f"Case file failed: {result.stderr[:200]}") + + # Parse the JSON output + output = result.stdout.strip() + if not output: + raise ValueError("Case file produced no output") + + return json.loads(output) + + +def capture_case_snapshot(case_path: Path) -> CaseSnapshot: + """Capture complete validation snapshot for a case file.""" + case_path = Path(case_path) + + try: + params = load_case_params(case_path) + except (ValueError, json.JSONDecodeError, subprocess.TimeoutExpired, + subprocess.SubprocessError, OSError, FileNotFoundError) as e: + # Catch expected case loading errors, not programming bugs + return CaseSnapshot( + case_path=str(case_path), + param_count=0, + param_hash="", + stages={}, + load_error=f"{type(e).__name__}: {str(e)}" + ) + + stages = {} + for stage in ["pre_process", "simulation", "post_process"]: + result = validate_case_for_stage(params, stage) + result.case_path = str(case_path) + stages[stage] = result + + return CaseSnapshot( + case_path=str(case_path), + param_count=len(params), + param_hash=hash_params(params), + stages=stages + ) + + +def capture_all_examples(examples_dir: Path = None) -> Dict[str, CaseSnapshot]: + """Capture validation snapshots for all example cases.""" + if examples_dir is None: + examples_dir = Path(__file__).parent.parent.parent.parent / "examples" + + snapshots = {} + + # Find all case.py files + case_files = sorted(examples_dir.glob("**/case.py")) + + for case_file in case_files: + relative_path = case_file.relative_to(examples_dir) + case_name = str(relative_path.parent) + + print(f" Capturing: {case_name}...", end=" ", flush=True) + try: + snapshot = capture_case_snapshot(case_file) + snapshots[case_name] = snapshot + + if snapshot.load_error: + print(f"LOAD ERROR: {snapshot.load_error[:50]}") + else: + errors = sum(s.error_count for s in snapshot.stages.values()) + if errors > 0: + print(f"ERRORS: {errors}") + else: + print("OK") + except (ValueError, KeyError, TypeError, OSError, json.JSONDecodeError) as e: + # Catch expected errors during capture, not programming bugs + print(f"EXCEPTION: {e}") + snapshots[case_name] = CaseSnapshot( + case_path=str(case_file), + param_count=0, + param_hash="", + stages={}, + load_error=f"Capture exception: {type(e).__name__}: {str(e)}" + ) + + return snapshots + + +def snapshot_to_dict(snapshot: CaseSnapshot) -> Dict[str, Any]: + """Convert snapshot to JSON-serializable dict.""" + result = asdict(snapshot) + # Convert ValidationResult objects in stages + result["stages"] = { + stage: asdict(vr) for stage, vr in snapshot.stages.items() + } + return result + + +def save_snapshots(snapshots: Dict[str, CaseSnapshot], output_path: Path = None): + """Save snapshots to JSON file.""" + if output_path is None: + output_path = Path(__file__).parent / "validation_snapshots.json" + + data = { + "metadata": { + "total_cases": len(snapshots), + "load_errors": sum(1 for s in snapshots.values() if s.load_error), + "validation_errors": sum( + sum(stage.error_count for stage in s.stages.values()) + for s in snapshots.values() if not s.load_error + ), + }, + "snapshots": { + name: snapshot_to_dict(snapshot) + for name, snapshot in snapshots.items() + } + } + + with open(output_path, 'w', encoding='utf-8') as f: + json.dump(data, f, indent=2) + + return output_path + + +def load_snapshots(input_path: Path) -> Dict[str, Any]: + """Load snapshots from JSON file.""" + with open(input_path, 'r', encoding='utf-8') as f: + return json.load(f) + + +def compare_snapshots( + old_snapshots: Dict[str, Any], + new_snapshots: Dict[str, CaseSnapshot] +) -> Dict[str, Any]: + """Compare old and new snapshots, report differences.""" + differences = { + "new_cases": [], + "removed_cases": [], + "changed_validation": [], + "unchanged": [], + } + + old_cases = set(old_snapshots.get("snapshots", {}).keys()) + new_cases = set(new_snapshots.keys()) + + differences["new_cases"] = sorted(new_cases - old_cases) + differences["removed_cases"] = sorted(old_cases - new_cases) + + for case_name in sorted(old_cases & new_cases): + old_snap = old_snapshots["snapshots"][case_name] + new_snap = snapshot_to_dict(new_snapshots[case_name]) + + # Compare validation results + changed = False + changes = [] + + for stage in ["pre_process", "simulation", "post_process"]: + old_stage = old_snap.get("stages", {}).get(stage, {}) + new_stage = new_snap.get("stages", {}).get(stage, {}) + + old_errors = set(old_stage.get("errors", [])) + new_errors = set(new_stage.get("errors", [])) + + if old_errors != new_errors: + changed = True + changes.append({ + "stage": stage, + "old_error_count": len(old_errors), + "new_error_count": len(new_errors), + "added_errors": sorted(new_errors - old_errors), + "removed_errors": sorted(old_errors - new_errors), + }) + + if changed: + differences["changed_validation"].append({ + "case": case_name, + "changes": changes + }) + else: + differences["unchanged"].append(case_name) + + return differences + + +def print_comparison_report(differences: Dict[str, Any]): + """Print a human-readable comparison report.""" + print("=" * 60) + print("Validation Comparison Report") + print("=" * 60) + + print(f"\nNew cases: {len(differences['new_cases'])}") + for case in differences['new_cases'][:5]: + print(f" + {case}") + if len(differences['new_cases']) > 5: + print(f" ... and {len(differences['new_cases']) - 5} more") + + print(f"\nRemoved cases: {len(differences['removed_cases'])}") + for case in differences['removed_cases'][:5]: + print(f" - {case}") + + print(f"\nChanged validation: {len(differences['changed_validation'])}") + for item in differences['changed_validation'][:10]: + print(f"\n {item['case']}:") + for change in item['changes']: + print(f" [{change['stage']}] {change['old_error_count']} -> {change['new_error_count']} errors") + for err in change['added_errors'][:2]: + print(f" + {err[:60]}...") + for err in change['removed_errors'][:2]: + print(f" - {err[:60]}...") + + print(f"\nUnchanged: {len(differences['unchanged'])}") + + +if __name__ == "__main__": + print("Capturing validation snapshots for all examples...") + all_snapshots = capture_all_examples() + path = save_snapshots(all_snapshots) + print(f"\nSnapshots saved to: {path}") diff --git a/toolchain/mfc/params_tests/test_definitions.py b/toolchain/mfc/params_tests/test_definitions.py new file mode 100644 index 0000000000..7b9710bf43 --- /dev/null +++ b/toolchain/mfc/params_tests/test_definitions.py @@ -0,0 +1,187 @@ +""" +Unit tests for params/definitions.py module. + +Tests parameter definitions, constraints, and dependencies. +""" + +import unittest +from ..params import REGISTRY +from ..params.schema import ParamType +from ..params.definitions import ( + CONSTRAINTS, + DEPENDENCIES, + CASE_OPT_PARAMS, + _validate_constraint, + _validate_dependency, +) + + +class TestParameterDefinitions(unittest.TestCase): + """Tests for parameter definitions.""" + + def test_all_params_have_names(self): + """Every parameter should have a non-empty name.""" + for name, param in REGISTRY.all_params.items(): + self.assertEqual(name, param.name) + self.assertTrue(len(name) > 0) + + def test_all_params_have_valid_type(self): + """Every parameter should have a valid ParamType.""" + for name, param in REGISTRY.all_params.items(): + self.assertIsInstance( + param.param_type, ParamType, + f"Parameter '{name}' has invalid type" + ) + + def test_core_params_exist(self): + """Core parameters m, n, p, model_eqns should exist.""" + core_params = ["m", "n", "p", "model_eqns", "num_fluids"] + for param_name in core_params: + self.assertIn( + param_name, REGISTRY.all_params, + f"Core parameter '{param_name}' not found" + ) + + def test_domain_params_exist(self): + """Domain parameters should exist for all directions.""" + for d in ["x", "y", "z"]: + self.assertIn(f"{d}_domain%beg", REGISTRY.all_params) + self.assertIn(f"{d}_domain%end", REGISTRY.all_params) + self.assertIn(f"bc_{d}%beg", REGISTRY.all_params) + self.assertIn(f"bc_{d}%end", REGISTRY.all_params) + + +class TestConstraintValidation(unittest.TestCase): + """Tests for constraint schema validation.""" + + def test_valid_choices_constraint(self): + """Valid choices constraint should pass.""" + _validate_constraint("test", {"choices": [1, 2, 3]}) + + def test_valid_range_constraint(self): + """Valid range constraint should pass.""" + _validate_constraint("test", {"min": 0, "max": 100}) + + def test_invalid_key_raises(self): + """Invalid constraint key should raise ValueError.""" + with self.assertRaises(ValueError) as ctx: + _validate_constraint("test", {"chioces": [1, 2]}) # Typo for choices + + self.assertIn("chioces", str(ctx.exception)) + # Verify "did you mean?" suggestion is provided + self.assertIn("choices", str(ctx.exception)) + + def test_choices_must_be_list(self): + """choices value must be a list.""" + with self.assertRaises(ValueError): + _validate_constraint("test", {"choices": "not a list"}) + + def test_min_must_be_number(self): + """min value must be a number.""" + with self.assertRaises(ValueError): + _validate_constraint("test", {"min": "not a number"}) + + def test_all_defined_constraints_are_valid(self): + """All constraints in CONSTRAINTS dict should be valid.""" + for param_name, constraint in CONSTRAINTS.items(): + try: + _validate_constraint(param_name, constraint) + except ValueError as e: + self.fail(f"Invalid constraint for '{param_name}': {e}") + + +class TestDependencyValidation(unittest.TestCase): + """Tests for dependency schema validation.""" + + def test_valid_when_true_dependency(self): + """Valid when_true dependency should pass.""" + _validate_dependency("test", { + "when_true": { + "requires": ["other_param"], + "recommends": ["another_param"], + } + }) + + def test_invalid_top_level_key_raises(self): + """Invalid top-level dependency key should raise.""" + with self.assertRaises(ValueError) as ctx: + _validate_dependency("test", {"when_tru": {"requires": []}}) # Typo + + self.assertIn("when_tru", str(ctx.exception)) + + def test_invalid_condition_key_raises(self): + """Invalid condition key should raise.""" + with self.assertRaises(ValueError) as ctx: + _validate_dependency("test", { + "when_true": {"reqires": ["foo"]} # Typo for requires + }) + + self.assertIn("reqires", str(ctx.exception)) + # Verify "did you mean?" suggestion is provided + self.assertIn("requires", str(ctx.exception)) + + def test_requires_must_be_list(self): + """requires value must be a list.""" + with self.assertRaises(ValueError): + _validate_dependency("test", { + "when_true": {"requires": "not a list"} + }) + + def test_all_defined_dependencies_are_valid(self): + """All dependencies in DEPENDENCIES dict should be valid.""" + for param_name, dependency in DEPENDENCIES.items(): + try: + _validate_dependency(param_name, dependency) + except ValueError as e: + self.fail(f"Invalid dependency for '{param_name}': {e}") + + +class TestCaseOptimizationParams(unittest.TestCase): + """Tests for case optimization parameter set.""" + + def test_case_opt_params_exist_in_registry(self): + """All CASE_OPT_PARAMS should exist in registry.""" + for param_name in CASE_OPT_PARAMS: + self.assertIn( + param_name, REGISTRY.all_params, + f"Case opt param '{param_name}' not in registry" + ) + + def test_case_opt_params_have_flag_set(self): + """Params in CASE_OPT_PARAMS should have case_optimization=True.""" + for param_name in CASE_OPT_PARAMS: + param = REGISTRY.all_params[param_name] + self.assertTrue( + param.case_optimization, + f"Parameter '{param_name}' should have case_optimization=True" + ) + + +class TestParameterCounts(unittest.TestCase): + """Tests for expected parameter counts.""" + + def test_total_param_count(self): + """Total parameter count should be around 3400.""" + count = len(REGISTRY.all_params) + self.assertGreater(count, 3000, "Too few parameters") + self.assertLess(count, 4000, "Too many parameters") + + def test_log_params_count(self): + """Should have many LOG type parameters.""" + log_count = sum( + 1 for p in REGISTRY.all_params.values() + if p.param_type == ParamType.LOG + ) + self.assertGreater(log_count, 300, "Too few LOG parameters") + + def test_tagged_params_exist(self): + """Should have params with feature tags.""" + mhd_params = REGISTRY.get_params_by_tag("mhd") + self.assertGreater(len(mhd_params), 5, "Too few MHD parameters") + + bubbles_params = REGISTRY.get_params_by_tag("bubbles") + self.assertGreater(len(bubbles_params), 10, "Too few bubbles parameters") + + +if __name__ == "__main__": + unittest.main() diff --git a/toolchain/mfc/params_tests/test_integration.py b/toolchain/mfc/params_tests/test_integration.py new file mode 100644 index 0000000000..8a921310ed --- /dev/null +++ b/toolchain/mfc/params_tests/test_integration.py @@ -0,0 +1,234 @@ +""" +Integration tests for params module with case_dicts. + +Tests that the parameter registry integrates correctly with case_dicts.py +and provides correct JSON schema generation. +""" +# pylint: disable=import-outside-toplevel + +import unittest +from ..params import REGISTRY +from ..params.schema import ParamType + + +class TestParamTypeJsonSchema(unittest.TestCase): + """Tests for ParamType.json_schema property.""" + + def test_all_types_have_json_schema(self): + """Every ParamType should have a json_schema property.""" + for param_type in ParamType: + schema = param_type.json_schema + self.assertIsInstance(schema, dict) + # Schema must have either "type" or "enum" key + self.assertTrue( + "type" in schema or "enum" in schema, + f"{param_type.name} schema has neither 'type' nor 'enum'" + ) + + def test_int_schema(self): + """INT should map to integer JSON schema.""" + schema = ParamType.INT.json_schema + self.assertEqual(schema, {"type": "integer"}) + + def test_real_schema(self): + """REAL should map to number JSON schema.""" + schema = ParamType.REAL.json_schema + self.assertEqual(schema, {"type": "number"}) + + def test_log_schema(self): + """LOG should map to enum with T/F.""" + schema = ParamType.LOG.json_schema + self.assertEqual(schema, {"enum": ["T", "F"]}) + + def test_str_schema(self): + """STR should map to string JSON schema.""" + schema = ParamType.STR.json_schema + self.assertEqual(schema, {"type": "string"}) + + def test_analytic_int_schema(self): + """ANALYTIC_INT should accept integer or string.""" + schema = ParamType.ANALYTIC_INT.json_schema + self.assertEqual(schema, {"type": ["integer", "string"]}) + + def test_analytic_real_schema(self): + """ANALYTIC_REAL should accept number or string.""" + schema = ParamType.ANALYTIC_REAL.json_schema + self.assertEqual(schema, {"type": ["number", "string"]}) + + +class TestRegistryJsonSchema(unittest.TestCase): + """Tests for registry JSON schema generation.""" + + def test_get_json_schema_returns_valid_schema(self): + """get_json_schema should return valid JSON schema structure.""" + schema = REGISTRY.get_json_schema() + + self.assertIn("type", schema) + self.assertEqual(schema["type"], "object") + self.assertIn("properties", schema) + self.assertIn("additionalProperties", schema) + self.assertFalse(schema["additionalProperties"]) + + def test_get_json_schema_has_all_params(self): + """Schema properties should include all registry params.""" + schema = REGISTRY.get_json_schema() + properties = schema["properties"] + + self.assertEqual(len(properties), len(REGISTRY.all_params)) + + def test_core_params_in_schema(self): + """Core params should be in JSON schema.""" + schema = REGISTRY.get_json_schema() + props = schema["properties"] + + self.assertIn("m", props) + self.assertIn("n", props) + self.assertIn("p", props) + self.assertIn("model_eqns", props) + + +class TestRegistryTagQueries(unittest.TestCase): + """Tests for tag-based parameter queries.""" + + def test_get_params_by_tag(self): + """Should get params by feature tag.""" + mhd_params = REGISTRY.get_params_by_tag("mhd") + + self.assertIsInstance(mhd_params, dict) + self.assertGreater(len(mhd_params), 5) + self.assertIn("mhd", mhd_params) + + def test_get_all_tags(self): + """Should get all registered tags.""" + tags = REGISTRY.get_all_tags() + + self.assertIsInstance(tags, set) + self.assertIn("mhd", tags) + self.assertIn("bubbles", tags) + self.assertIn("weno", tags) + + def test_params_have_correct_tags(self): + """Parameters should have their expected tags.""" + mhd_param = REGISTRY.all_params.get("mhd") + self.assertIsNotNone(mhd_param) + self.assertIn("mhd", mhd_param.tags) + + bubbles_param = REGISTRY.all_params.get("bubbles_euler") + self.assertIsNotNone(bubbles_param) + self.assertIn("bubbles", bubbles_param.tags) + + +class TestCaseDictsIntegration(unittest.TestCase): + """Tests for integration with actual case_dicts module.""" + + def test_case_dicts_loads_from_registry(self): + """case_dicts module should load successfully from registry.""" + from ..run import case_dicts + + # ALL should be populated + self.assertIsNotNone(case_dicts.ALL) + + def test_case_dicts_all_contains_all_params(self): + """case_dicts.ALL should contain all registry params.""" + from ..run import case_dicts + + # ALL should have approximately the same params as registry + self.assertEqual(len(case_dicts.ALL), len(REGISTRY.all_params)) + + def test_case_optimization_params_from_registry(self): + """CASE_OPTIMIZATION should be populated from registry.""" + from ..run import case_dicts + + self.assertIsInstance(case_dicts.CASE_OPTIMIZATION, list) + self.assertGreater(len(case_dicts.CASE_OPTIMIZATION), 10) + + def test_json_schema_valid(self): + """Generated JSON schema should be valid.""" + from ..run import case_dicts + + schema = case_dicts.SCHEMA + self.assertIn("type", schema) + self.assertEqual(schema["type"], "object") + self.assertIn("properties", schema) + self.assertEqual(len(schema["properties"]), len(REGISTRY.all_params)) + + def test_json_schema_matches_registry(self): + """case_dicts.SCHEMA should match REGISTRY.get_json_schema().""" + from ..run import case_dicts + + registry_schema = REGISTRY.get_json_schema() + case_dicts_schema = case_dicts.SCHEMA + + self.assertEqual(registry_schema, case_dicts_schema) + + def test_get_validator_works(self): + """get_validator should return a callable validator.""" + from ..run import case_dicts + + validator = case_dicts.get_validator() + self.assertTrue(callable(validator)) + + def test_get_input_dict_keys(self): + """get_input_dict_keys should return target-specific params.""" + from ..run import case_dicts + + # Each target gets a filtered subset of params based on Fortran namelists + pre_keys = case_dicts.get_input_dict_keys("pre_process") + sim_keys = case_dicts.get_input_dict_keys("simulation") + post_keys = case_dicts.get_input_dict_keys("post_process") + + # pre_process has most params (includes patch_icpp, patch_bc) + self.assertGreater(len(pre_keys), 2500) + # simulation and post_process have fewer (no patch_icpp, etc.) + self.assertGreater(len(sim_keys), 500) + self.assertGreater(len(post_keys), 400) + + # Verify target-specific filtering based on Fortran namelists + self.assertIn("num_patches", pre_keys) + self.assertNotIn("num_patches", sim_keys) + self.assertNotIn("num_patches", post_keys) + + self.assertNotIn("run_time_info", pre_keys) + self.assertIn("run_time_info", sim_keys) + self.assertNotIn("run_time_info", post_keys) + + # Verify indexed params are filtered correctly + patch_icpp_pre = [k for k in pre_keys if k.startswith("patch_icpp")] + patch_icpp_sim = [k for k in sim_keys if k.startswith("patch_icpp")] + self.assertGreater(len(patch_icpp_pre), 1000) # Many patch_icpp params + self.assertEqual(len(patch_icpp_sim), 0) # None in simulation + + # Verify shared params are in all targets + self.assertIn("m", pre_keys) + self.assertIn("m", sim_keys) + self.assertIn("m", post_keys) + + +class TestValidatorIntegration(unittest.TestCase): + """Tests for integration with case_validator.""" + + def test_validator_gets_log_params_from_registry(self): + """Validator should discover LOG params from registry.""" + from ..case_validator import _get_logical_params_from_registry + + log_params = _get_logical_params_from_registry() + + self.assertIsInstance(log_params, set) + self.assertGreater(len(log_params), 300) + + def test_validator_log_params_match_registry(self): + """Validator LOG params should match registry LOG params.""" + from ..case_validator import _get_logical_params_from_registry + + validator_log_params = _get_logical_params_from_registry() + + registry_log_params = { + name for name, p in REGISTRY.all_params.items() + if p.param_type == ParamType.LOG + } + + self.assertEqual(validator_log_params, registry_log_params) + + +if __name__ == "__main__": + unittest.main() diff --git a/toolchain/mfc/params_tests/test_registry.py b/toolchain/mfc/params_tests/test_registry.py new file mode 100644 index 0000000000..b2ba6c8b12 --- /dev/null +++ b/toolchain/mfc/params_tests/test_registry.py @@ -0,0 +1,132 @@ +""" +Unit tests for params/registry.py module. + +Tests registry functionality, freezing, and tag queries. +""" +# pylint: disable=import-outside-toplevel + +import unittest +from ..params.registry import ParamRegistry, RegistryFrozenError +from ..params.schema import ParamDef, ParamType + + +class TestParamRegistry(unittest.TestCase): + """Tests for ParamRegistry class.""" + + def test_register_new_param(self): + """Registering a new param should add it to the registry.""" + reg = ParamRegistry() + param = ParamDef(name="test", param_type=ParamType.INT) + reg.register(param) + + self.assertIn("test", reg.all_params) + self.assertEqual(reg.all_params["test"].param_type, ParamType.INT) + + def test_register_merge_tags(self): + """Registering same param twice should merge tags.""" + reg = ParamRegistry() + reg.register(ParamDef(name="test", param_type=ParamType.INT, tags={"mhd"})) + reg.register(ParamDef(name="test", param_type=ParamType.INT, tags={"physics"})) + + self.assertEqual( + reg.all_params["test"].tags, + {"mhd", "physics"} + ) + + def test_register_type_mismatch_raises(self): + """Registering same param with different type should raise.""" + reg = ParamRegistry() + reg.register(ParamDef(name="test", param_type=ParamType.INT)) + + with self.assertRaises(ValueError) as ctx: + reg.register(ParamDef(name="test", param_type=ParamType.REAL)) + + self.assertIn("Type mismatch", str(ctx.exception)) + + def test_get_params_by_tag(self): + """get_params_by_tag should return params with that tag.""" + reg = ParamRegistry() + reg.register(ParamDef(name="mhd_param", param_type=ParamType.INT, tags={"mhd"})) + reg.register(ParamDef(name="other_param", param_type=ParamType.INT, tags={"other"})) + + mhd_params = reg.get_params_by_tag("mhd") + + self.assertIn("mhd_param", mhd_params) + self.assertNotIn("other_param", mhd_params) + + def test_get_all_tags(self): + """get_all_tags should return all registered tags.""" + reg = ParamRegistry() + reg.register(ParamDef(name="p1", param_type=ParamType.INT, tags={"mhd", "physics"})) + reg.register(ParamDef(name="p2", param_type=ParamType.INT, tags={"bubbles"})) + + tags = reg.get_all_tags() + + self.assertEqual(tags, {"mhd", "physics", "bubbles"}) + + +class TestRegistryFreezing(unittest.TestCase): + """Tests for registry freeze functionality.""" + + def test_freeze_sets_flag(self): + """freeze() should set is_frozen to True.""" + reg = ParamRegistry() + self.assertFalse(reg.is_frozen) + + reg.freeze() + + self.assertTrue(reg.is_frozen) + + def test_freeze_is_idempotent(self): + """Calling freeze() multiple times should be safe.""" + reg = ParamRegistry() + reg.freeze() + reg.freeze() # Should not raise + + self.assertTrue(reg.is_frozen) + + def test_register_after_freeze_raises(self): + """Registering after freeze should raise RegistryFrozenError.""" + reg = ParamRegistry() + reg.freeze() + + with self.assertRaises(RegistryFrozenError): + reg.register(ParamDef(name="test", param_type=ParamType.INT)) + + def test_all_params_readonly_after_freeze(self): + """all_params should be read-only after freeze.""" + reg = ParamRegistry() + reg.register(ParamDef(name="test", param_type=ParamType.INT)) + reg.freeze() + + params = reg.all_params + + with self.assertRaises(TypeError): + params["hacked"] = "value" + + +class TestGlobalRegistry(unittest.TestCase): + """Tests for the global REGISTRY instance.""" + + def test_global_registry_is_frozen(self): + """Global REGISTRY should be frozen after import.""" + from ..params import REGISTRY + self.assertTrue(REGISTRY.is_frozen) + + def test_global_registry_has_params(self): + """Global REGISTRY should have parameters loaded.""" + from ..params import REGISTRY + self.assertGreater(len(REGISTRY.all_params), 3000) + + def test_global_registry_cannot_be_modified(self): + """Global REGISTRY should reject new registrations.""" + from ..params import REGISTRY + + with self.assertRaises(RegistryFrozenError): + REGISTRY.register( + ParamDef(name="injected", param_type=ParamType.INT) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/toolchain/mfc/params_tests/test_validate.py b/toolchain/mfc/params_tests/test_validate.py new file mode 100644 index 0000000000..0f2cb69d19 --- /dev/null +++ b/toolchain/mfc/params_tests/test_validate.py @@ -0,0 +1,235 @@ +""" +Unit tests for params/validate.py module. + +Tests constraint validation, dependency checking, and error formatting. +""" + +import unittest +from ..params.validate import ( + validate_constraints, + check_dependencies, + check_unknown_params, + validate_case, + format_validation_results, +) +from ..params.suggest import RAPIDFUZZ_AVAILABLE + + +class TestValidateConstraints(unittest.TestCase): + """Tests for validate_constraints function.""" + + def test_valid_params_no_errors(self): + """Valid parameters should return empty error list.""" + params = { + "m": 100, + "n": 50, + "dt": 1e-5, + } + errors = validate_constraints(params) + self.assertEqual(errors, []) + + def test_unknown_params_ignored(self): + """Unknown parameters should be silently ignored.""" + params = { + "unknown_param_xyz": 123, + "another_unknown": "value", + } + errors = validate_constraints(params) + self.assertEqual(errors, []) + + def test_analytic_string_skipped(self): + """Analytic expressions (strings) should skip numeric validation.""" + params = { + "dt": "1e-5 * m / 100", # String expression for numeric param + } + errors = validate_constraints(params) + self.assertEqual(errors, []) + + def test_constraint_validation_called(self): + """Constraint validation should be invoked for registered params.""" + # This tests the integration with ParamDef.validate_value + params = { + "m": 100, # Valid integer + } + errors = validate_constraints(params) + self.assertEqual(errors, []) + + +class TestCheckUnknownParams(unittest.TestCase): + """Tests for check_unknown_params with 'did you mean?' suggestions.""" + + def test_known_params_no_errors(self): + """Known parameters should not generate errors.""" + params = { + "m": 100, + "n": 50, + "dt": 1e-5, + } + errors = check_unknown_params(params) + self.assertEqual(errors, []) + + def test_unknown_param_returns_error(self): + """Unknown parameter should return an error.""" + params = { + "totally_unknown_xyz_123": 42, + } + errors = check_unknown_params(params) + self.assertEqual(len(errors), 1) + self.assertIn("Unknown parameter", errors[0]) + self.assertIn("totally_unknown_xyz_123", errors[0]) + + @unittest.skipUnless(RAPIDFUZZ_AVAILABLE, "rapidfuzz not installed") + def test_similar_param_suggests_correction(self): + """Typo near valid param should suggest 'did you mean?'.""" + # "model_eqn" is close to "model_eqns" + params = { + "model_eqn": 2, # Missing 's' + } + errors = check_unknown_params(params) + self.assertEqual(len(errors), 1) + self.assertIn("model_eqn", errors[0]) + # Should suggest the correct parameter + self.assertIn("model_eqns", errors[0]) + self.assertIn("Did you mean", errors[0]) + + @unittest.skipUnless(RAPIDFUZZ_AVAILABLE, "rapidfuzz not installed") + def test_weno_typo_suggests_correction(self): + """Typo in weno_order should suggest correction.""" + params = { + "weno_ordr": 5, # Typo for weno_order + } + errors = check_unknown_params(params) + self.assertEqual(len(errors), 1) + self.assertIn("weno_order", errors[0]) + + +class TestCheckDependencies(unittest.TestCase): + """Tests for check_dependencies function.""" + + def test_no_dependencies_empty_result(self): + """Params without dependencies return empty errors/warnings.""" + params = { + "m": 100, + "n": 50, + } + errors, warnings = check_dependencies(params) + self.assertEqual(errors, []) + self.assertEqual(warnings, []) + + def test_unknown_params_ignored(self): + """Unknown parameters should be silently ignored.""" + params = { + "unknown_param_xyz": "T", + } + errors, warnings = check_dependencies(params) + self.assertEqual(errors, []) + self.assertEqual(warnings, []) + + +class TestValidateCase(unittest.TestCase): + """Tests for validate_case function.""" + + def test_valid_case_no_errors(self): + """Valid case should return empty errors.""" + params = { + "m": 100, + "n": 50, + "p": 0, + "dt": 1e-5, + } + errors, _ = validate_case(params) + self.assertEqual(errors, []) + + def test_warn_false_skips_warnings(self): + """warn=False should skip dependency warnings.""" + params = {"m": 100} + _, warnings = validate_case(params, warn=False) + self.assertEqual(warnings, []) + + def test_returns_tuple(self): + """Should return tuple of (errors, warnings).""" + params = {"m": 100} + result = validate_case(params) + self.assertIsInstance(result, tuple) + self.assertEqual(len(result), 2) + self.assertIsInstance(result[0], list) + self.assertIsInstance(result[1], list) + + def test_check_unknown_true_catches_unknown(self): + """check_unknown=True should catch unknown parameters.""" + params = { + "m": 100, + "unknwn_prm": 42, # Unknown param + } + errors, _ = validate_case(params, check_unknown=True) + self.assertGreater(len(errors), 0) + self.assertIn("unknwn_prm", str(errors)) + + def test_check_unknown_false_ignores_unknown(self): + """check_unknown=False should ignore unknown parameters.""" + params = { + "m": 100, + "unknwn_prm": 42, # Unknown param + } + errors, _ = validate_case(params, check_unknown=False) + # No errors from unknown params (may have other errors) + unknown_errors = [e for e in errors if "unknwn_prm" in e] + self.assertEqual(unknown_errors, []) + + +class TestFormatValidationResults(unittest.TestCase): + """Tests for format_validation_results function.""" + + def test_empty_results(self): + """Empty errors and warnings should return empty string.""" + result = format_validation_results([], []) + self.assertEqual(result, "") + + def test_errors_only(self): + """Format should include errors with red markers.""" + errors = ["Error 1", "Error 2"] + result = format_validation_results(errors, []) + self.assertIn("Validation Errors", result) + self.assertIn("Error 1", result) + self.assertIn("Error 2", result) + + def test_warnings_only(self): + """Format should include warnings with yellow markers.""" + warnings = ["Warning 1"] + result = format_validation_results([], warnings) + self.assertIn("Warnings", result) + self.assertIn("Warning 1", result) + + def test_both_errors_and_warnings(self): + """Format should include both errors and warnings.""" + errors = ["Error 1"] + warnings = ["Warning 1"] + result = format_validation_results(errors, warnings) + self.assertIn("Validation Errors", result) + self.assertIn("Error 1", result) + self.assertIn("Warnings", result) + self.assertIn("Warning 1", result) + + def test_rich_markup_included(self): + """Format should include Rich markup for colors.""" + errors = ["Error 1"] + result = format_validation_results(errors, []) + self.assertIn("[red]", result) + + +class TestSchemaValidation(unittest.TestCase): + """Tests for ParamDef.validate_value through validate_constraints.""" + + def test_numeric_type_check_for_min_max(self): + """String values should not crash on numeric comparisons.""" + # This tests the fix for the type comparison bug + params = { + "m": "100 + 50", # String that looks numeric but isn't + } + # Should not raise TypeError + errors = validate_constraints(params) + self.assertIsInstance(errors, list) + + +if __name__ == "__main__": + unittest.main() diff --git a/toolchain/mfc/run/case_dicts.py b/toolchain/mfc/run/case_dicts.py index 4b1ca33ca3..27a2ed86f1 100644 --- a/toolchain/mfc/run/case_dicts.py +++ b/toolchain/mfc/run/case_dicts.py @@ -1,582 +1,116 @@ -import fastjsonschema - -from enum import Enum +""" +MFC Case Parameter Type Definitions. + +This module provides exports from the central parameter registry (mfc.params). +All parameter definitions are sourced from the registry. + +Exports: + ALL: Dict of all parameters {name: ParamType} + IGNORE: Parameters to skip during certain operations + CASE_OPTIMIZATION: Parameters that can be hard-coded for GPU builds + SCHEMA: JSON schema for fastjsonschema validation + get_validator(): Returns compiled JSON schema validator + get_input_dict_keys(): Get parameter keys for a target +""" +# pylint: disable=import-outside-toplevel + +import re from ..state import ARG -from functools import cache - - -class ParamType(Enum): - INT = {"type": "integer"} - REAL = {"type": "number"} - LOG = {"enum": ["T", "F"]} - STR = {"type": "string"} - - _ANALYTIC_INT = {"type": ["integer", "string"]} - _ANALYTIC_REAL = {"type": ["number", "string"]} - - def analytic(self): - if self == self.INT: - return self._ANALYTIC_INT - if self == self.REAL: - return self._ANALYTIC_REAL - return self.STR - -COMMON = { - 'mhd': ParamType.LOG, - 'hypoelasticity': ParamType.LOG, - 'hyperelasticity': ParamType.LOG, - 'cyl_coord': ParamType.LOG, - 'pref': ParamType.REAL, - 'p': ParamType.INT, - 'parallel_io': ParamType.LOG, - 'poly_sigma': ParamType.REAL, - 'case_dir': ParamType.STR, - 'thermal': ParamType.INT, - 'polytropic': ParamType.LOG, - 'm': ParamType.INT, - 'mpp_lim': ParamType.LOG, - 'R0ref': ParamType.REAL, - 'num_fluids': ParamType.INT, - 'model_eqns': ParamType.INT, - 'nb': ParamType.REAL, - 'weno_order': ParamType.INT, - 'rhoref': ParamType.REAL, - 'bubbles_euler': ParamType.LOG, - 'n': ParamType.INT, - 'precision': ParamType.INT, - 'polydisperse': ParamType.LOG, - 'file_per_process': ParamType.LOG, - 'relax': ParamType.LOG, - 'relax_model': ParamType.INT, - 'sigma': ParamType.REAL, - 'adv_n': ParamType.LOG, - 'cfl_adap_dt': ParamType.LOG, - 'cfl_const_dt': ParamType.LOG, - 'chemistry': ParamType.LOG, - 'cantera_file': ParamType.STR, - 'Bx0': ParamType.REAL, - 'relativity': ParamType.LOG, - 'cont_damage': ParamType.LOG, - 'hyper_cleaning': ParamType.LOG, - 'num_bc_patches': ParamType.INT, - 'igr': ParamType.LOG, - 'igr_order': ParamType.INT, - 'down_sample': ParamType.LOG, - 'recon_type': ParamType.INT, - 'muscl_order': ParamType.INT, -} - -PRE_PROCESS = COMMON.copy() -PRE_PROCESS.update({ - 'old_grid': ParamType.LOG, - 'old_ic': ParamType.LOG, - 't_step_old': ParamType.INT, - 't_step_start': ParamType.INT, - 'mixlayer_vel_profile': ParamType.LOG, - 'mixlayer_vel_coef': ParamType.REAL, - 'mixlayer_domain': ParamType.REAL, - 'mixlayer_perturb': ParamType.LOG, - 'mixlayer_perturb_nk': ParamType.INT, - 'mixlayer_perturb_k0': ParamType.REAL, - 'perturb_flow': ParamType.LOG, - 'perturb_flow_fluid': ParamType.INT, - 'perturb_flow_mag': ParamType.REAL, - 'perturb_sph': ParamType.LOG, - 'perturb_sph_fluid': ParamType.INT, - 'fluid_rho': ParamType.REAL, - 'num_patches': ParamType.INT, - 'qbmm': ParamType.LOG, - 'dist_type': ParamType.INT, - 'sigR': ParamType.REAL, - 'sigV': ParamType.REAL, - 'rhoRV': ParamType.REAL, - 'palpha_eps': ParamType.REAL, - 'ptgalpha_eps': ParamType.REAL, - 'pi_fac': ParamType.REAL, - 'ib': ParamType.LOG, - 'num_ibs': ParamType.INT, - 'pre_stress': ParamType.LOG, - 'cfl_dt': ParamType.LOG, - 'n_start': ParamType.INT, - 'n_start_old': ParamType.INT, - 'surface_tension': ParamType.LOG, - 'elliptic_smoothing': ParamType.LOG, - 'elliptic_smoothing_iters': ParamType.INT, - 'viscous': ParamType.LOG, - 'bubbles_lagrange': ParamType.LOG, - 'simplex_perturb': ParamType.LOG, - 'fft_wrt': ParamType.LOG, -}) - -for var in ["R0ref", "p0ref", "rho0ref", "T0ref", "ss", "pv", "vd", - "mu_l", "mu_v", "mu_g", "gam_v", "gam_g", - "M_v", "M_g", "k_v", "k_g", "cp_v", "cp_g", "R_v", "R_g" ]: - PRE_PROCESS[f"bub_pp%{var}"] = ParamType.REAL - -for ib_id in range(1, 10+1): - for real_attr, ty in [("geometry", ParamType.INT), ("radius", ParamType.REAL), - ("theta", ParamType.REAL), ("slip", ParamType.LOG), - ("c", ParamType.REAL), ("p", ParamType.REAL), - ("t", ParamType.REAL), ("m", ParamType.REAL), - ("moving_ibm", ParamType.INT), ("mass", ParamType.REAL)]: - PRE_PROCESS[f"patch_ib({ib_id})%{real_attr}"] = ty - - for dir_id in range(1, 4): - PRE_PROCESS[f"patch_ib({ib_id})%angles({dir_id})"] = ParamType.REAL - - for cmp_id, cmp in enumerate(["x", "y", "z"]): - cmp_id += 1 - PRE_PROCESS[f'patch_ib({ib_id})%{cmp}_centroid'] = ParamType.REAL - PRE_PROCESS[f'patch_ib({ib_id})%length_{cmp}'] = ParamType.REAL - - for real_attr_stl, ty_stl in [("filepath", ParamType.STR), ("spc", ParamType.INT), - ("threshold", ParamType.REAL)]: - PRE_PROCESS[f"patch_ib({ib_id})%model_{real_attr_stl}"] = ty_stl - - for real_attr_stl2 in ["translate", "scale", "rotate"]: - for j in range(1, 4): - PRE_PROCESS[f"patch_ib({ib_id})%model_{real_attr_stl2}({j})"] = ParamType.REAL - -for cmp in ["x", "y", "z"]: - for prepend in ["domain%beg", "domain%end", "a", "b"]: - PRE_PROCESS[f"{cmp}_{prepend}"] = ParamType.REAL - - for append, ty in [("stretch", ParamType.LOG), ("a", ParamType.REAL), - ("loops", ParamType.INT)]: - PRE_PROCESS[f"{append}_{cmp}"] = ty - - PRE_PROCESS[f"bc_{cmp}%beg"] = ParamType.INT - PRE_PROCESS[f"bc_{cmp}%end"] = ParamType.INT - -for f_id in range(1, 10+1): - PRE_PROCESS[f'fluid_rho({f_id})'] = ParamType.REAL - - for real_attr in ["gamma", "pi_inf", "G", "cv", "qv", "qvp" ]: - PRE_PROCESS[f"fluid_pp({f_id})%{real_attr}"] = ParamType.REAL - - PRE_PROCESS[f"simplex_params%perturb_dens({f_id})"] = ParamType.LOG - PRE_PROCESS[f"simplex_params%perturb_dens_freq({f_id})"] = ParamType.REAL - PRE_PROCESS[f"simplex_params%perturb_dens_scale({f_id})"] = ParamType.REAL - - for dir in range(1, 3+1): - PRE_PROCESS[f"simplex_params%perturb_dens_offset({f_id}, {dir})"] = ParamType.REAL - -for bc_p_id in range(1, 10+1): - for attribute in ["geometry","type","dir","loc"]: - PRE_PROCESS[f"patch_bc({bc_p_id})%{attribute}"] = ParamType.INT - - for attribute in ["centroid","length"]: - for d_id in range(1, 3+1): - PRE_PROCESS[f"patch_bc({bc_p_id})%{attribute}({d_id})"] = ParamType.REAL - - PRE_PROCESS[f"patch_bc({bc_p_id})%radius"] = ParamType.REAL - -for p_id in range(1, 10+1): - for attribute, ty in [("geometry", ParamType.INT), ("smoothen", ParamType.LOG), - ("smooth_patch_id", ParamType.INT), ("hcid", ParamType.INT)]: - PRE_PROCESS[f"patch_icpp({p_id})%{attribute}"] = ty - - for real_attr in ["radius", "radii", "epsilon", "beta", "normal", "alpha_rho", - 'non_axis_sym', "normal", "smooth_coeff", "rho", "vel", - "alpha", "gamma", "pi_inf", "r0", "v0", "p0", "m0", "cv", - "qv", "qvp"]: - PRE_PROCESS[f"patch_icpp({p_id})%{real_attr}"] = ParamType.REAL - - for real_attr in range(2, 9+1): - PRE_PROCESS[f"patch_icpp({p_id})%a({real_attr})"] = ParamType.REAL - - PRE_PROCESS[f"patch_icpp({p_id})%pres"] = ParamType.REAL.analytic() - - PRE_PROCESS[f"patch_icpp({p_id})%Bx"] = ParamType.REAL.analytic() - PRE_PROCESS[f"patch_icpp({p_id})%By"] = ParamType.REAL.analytic() - PRE_PROCESS[f"patch_icpp({p_id})%Bz"] = ParamType.REAL.analytic() - - for i in range(100): - PRE_PROCESS[f"patch_icpp({p_id})%Y({i})"] = ParamType.REAL.analytic() - - PRE_PROCESS[f"patch_icpp({p_id})%model_filepath"] = ParamType.STR - - for real_attr in ["translate", "scale", "rotate"]: - for j in range(1, 4): - PRE_PROCESS[f"patch_icpp({p_id})%model_{real_attr}({j})"] = ParamType.REAL - - PRE_PROCESS[f"patch_icpp({p_id})%model_spc"] = ParamType.INT - PRE_PROCESS[f"patch_icpp({p_id})%model_threshold"] = ParamType.REAL - - for cmp_id, cmp in enumerate(["x", "y", "z"]): - cmp_id += 1 - PRE_PROCESS[f'patch_icpp({p_id})%{cmp}_centroid'] = ParamType.REAL - PRE_PROCESS[f'patch_icpp({p_id})%length_{cmp}'] = ParamType.REAL - - for append in ["radii", "normal"]: - PRE_PROCESS[f'patch_icpp({p_id})%{append}({cmp_id})'] = ParamType.REAL - PRE_PROCESS[f'patch_icpp({p_id})%vel({cmp_id})'] = ParamType.REAL.analytic() - - for arho_id in range(1, 10+1): - PRE_PROCESS[f'patch_icpp({p_id})%alpha({arho_id})'] = ParamType.REAL.analytic() - PRE_PROCESS[f'patch_icpp({p_id})%alpha_rho({arho_id})'] = ParamType.REAL.analytic() - - for taue_id in range(1, 6+1): - PRE_PROCESS[f'patch_icpp({p_id})%tau_e({taue_id})'] = ParamType.REAL.analytic() - - PRE_PROCESS[f'patch_icpp({p_id})%cf_val'] = ParamType.REAL.analytic() - - if p_id >= 2: - PRE_PROCESS[f'patch_icpp({p_id})%alter_patch'] = ParamType.LOG - - for alter_id in range(1, p_id): - PRE_PROCESS[f'patch_icpp({p_id})%alter_patch({alter_id})'] = ParamType.LOG - - PRE_PROCESS[f'patch_icpp({p_id})%cf_val'] = ParamType.REAL.analytic() - - for cmp in ["x", "y", "z"]: - PRE_PROCESS[f'bc_{cmp}%beg'] = ParamType.INT - PRE_PROCESS[f'bc_{cmp}%end'] = ParamType.INT - PRE_PROCESS[f'bc_{cmp}%vb1'] = ParamType.REAL - PRE_PROCESS[f'bc_{cmp}%vb2'] = ParamType.REAL - PRE_PROCESS[f'bc_{cmp}%vb3'] = ParamType.REAL - PRE_PROCESS[f'bc_{cmp}%ve1'] = ParamType.REAL - PRE_PROCESS[f'bc_{cmp}%ve2'] = ParamType.REAL - PRE_PROCESS[f'bc_{cmp}%ve3'] = ParamType.REAL - PRE_PROCESS[f'bc_{cmp}%pres_in'] = ParamType.REAL - PRE_PROCESS[f'bc_{cmp}%pres_out'] = ParamType.REAL - PRE_PROCESS[f'bc_{cmp}%grcbc_in'] = ParamType.LOG - PRE_PROCESS[f'bc_{cmp}%grcbc_out'] = ParamType.LOG - PRE_PROCESS[f'bc_{cmp}%grcbc_vel_out'] = ParamType.LOG - - for int_id in range(1, 10+1): - PRE_PROCESS[f"bc_{cmp}%alpha_rho_in({int_id})"] = ParamType.REAL - PRE_PROCESS[f"bc_{cmp}%alpha_in({int_id})"] = ParamType.REAL - - for int_id in range(1, 3+1): - PRE_PROCESS[f"bc_{cmp}%vel_in({int_id})"] = ParamType.REAL - PRE_PROCESS[f"bc_{cmp}%vel_out({int_id})"] = ParamType.REAL - -for d_id in range(1, 3+1): - PRE_PROCESS[f"simplex_params%perturb_vel({d_id})"] = ParamType.LOG - PRE_PROCESS[f"simplex_params%perturb_vel_freq({d_id})"] = ParamType.REAL - PRE_PROCESS[f"simplex_params%perturb_vel_scale({d_id})"] = ParamType.REAL - for dir in range(1, 3+1): - PRE_PROCESS[f"simplex_params%perturb_vel_offset({d_id},{dir})"] = ParamType.REAL - -# NOTE: Currently unused. -# for f_id in range(1, 10+1): -# PRE_PROCESS.append(f"spec_pp({f_id})") - - -# Removed: 't_tol', 'alt_crv', 'regularization', 'lsq_deriv', -# Feel free to put them back if they are needed once more. -# Be sure to add them to the correct type set at the top of the file too! -SIMULATION = COMMON.copy() -SIMULATION.update({ - 'run_time_info': ParamType.LOG, - 't_step_old': ParamType.INT, - 'dt': ParamType.REAL, - 't_step_start': ParamType.INT, - 't_step_stop': ParamType.INT, - 't_step_save': ParamType.INT, - 't_step_print': ParamType.INT, - 'time_stepper': ParamType.INT, - 'weno_eps': ParamType.REAL, - 'teno_CT': ParamType.REAL, - 'wenoz_q': ParamType.REAL, - 'mapped_weno': ParamType.LOG, - 'wenoz': ParamType.LOG, - 'teno': ParamType.LOG, - 'mp_weno': ParamType.LOG, - 'weno_avg': ParamType.LOG, - 'weno_Re_flux': ParamType.LOG, - 'riemann_solver': ParamType.INT, - 'wave_speeds': ParamType.INT, - 'avg_state': ParamType.INT, - 'prim_vars_wrt': ParamType.LOG, - 'alt_soundspeed': ParamType.LOG, - 'null_weights': ParamType.LOG, - 'mixture_err': ParamType.LOG, - 'fd_order': ParamType.INT, - 'num_probes': ParamType.INT, - 'probe_wrt': ParamType.LOG, - 'bubble_model': ParamType.INT, - 'acoustic_source': ParamType.LOG, - 'num_source': ParamType.INT, - 'qbmm': ParamType.LOG, - 'integral_wrt': ParamType.LOG, - 'num_integrals': ParamType.INT, - 'rdma_mpi': ParamType.LOG, - 'palpha_eps': ParamType.REAL, - 'ptgalpha_eps': ParamType.REAL, - 'pi_fac': ParamType.REAL, - 'adap_dt': ParamType.LOG, - 'adap_dt_tol': ParamType.REAL, - 'adap_dt_max_iters': ParamType.INT, - 'ib': ParamType.LOG, - 'num_ibs': ParamType.INT, - 'n_start': ParamType.INT, - 't_stop': ParamType.REAL, - 't_save': ParamType.REAL, - 'cfl_target': ParamType.REAL, - 'low_Mach': ParamType.INT, - 'surface_tension': ParamType.LOG, - 'viscous': ParamType.LOG, - 'bubbles_lagrange': ParamType.LOG, - 'num_bc_patches': ParamType.INT, - 'tau_star': ParamType.REAL, - 'cont_damage_s': ParamType.REAL, - 'alpha_bar': ParamType.REAL, - 'hyper_cleaning_speed': ParamType.REAL, - 'hyper_cleaning_tau': ParamType.REAL, - 'num_igr_iters': ParamType.INT, - 'num_igr_warm_start_iters': ParamType.INT, - 'alf_factor': ParamType.REAL, - 'igr_iter_solver': ParamType.INT, - 'igr_pres_lim': ParamType.LOG, - 'recon_type': ParamType.INT, - 'muscl_order': ParamType.INT, - 'muscl_lim': ParamType.INT, - 'int_comp': ParamType.LOG, - 'ic_eps': ParamType.REAL, - 'ic_beta': ParamType.REAL, - 'nv_uvm_out_of_core': ParamType.LOG, - 'nv_uvm_igr_temps_on_gpu': ParamType.INT, - 'nv_uvm_pref_gpu': ParamType.LOG, - 'fft_wrt': ParamType.LOG, -}) - -for var in [ 'heatTransfer_model', 'massTransfer_model', 'pressure_corrector', - 'write_bubbles', 'write_bubbles_stats' ]: - SIMULATION[f'lag_params%{var}'] = ParamType.LOG - -for var in [ 'solver_approach', 'cluster_type', 'smooth_type', 'nBubs_glb']: - SIMULATION[f'lag_params%{var}'] = ParamType.INT - -for var in [ 'epsilonb', 'valmaxvoid', 'charwidth']: - SIMULATION[f'lag_params%{var}'] = ParamType.REAL - -for var in [ 'diffusion', 'reactions' ]: - SIMULATION[f'chem_params%{var}'] = ParamType.LOG - -for var in [ 'gamma_method', 'transport_model']: - SIMULATION[f'chem_params%{var}'] = ParamType.INT - -for var in ["R0ref", "p0ref", "rho0ref", "T0ref", "ss", "pv", "vd", - "mu_l", "mu_v", "mu_g", "gam_v", "gam_g", - "M_v", "M_g", "k_v", "k_g", "cp_v", "cp_g", "R_v", "R_g" ]: - SIMULATION[f"bub_pp%{var}"] = ParamType.REAL - -for ib_id in range(1, 10+1): - for real_attr, ty in [("geometry", ParamType.INT), ("radius", ParamType.REAL), - ("theta", ParamType.REAL), ("slip", ParamType.LOG), - ("c", ParamType.REAL), ("p", ParamType.REAL), - ("t", ParamType.REAL), ("m", ParamType.REAL), - ("moving_ibm", ParamType.INT), ("mass", ParamType.REAL)]: - SIMULATION[f"patch_ib({ib_id})%{real_attr}"] = ty - - for dir_id in range(1, 4): - SIMULATION[f"patch_ib({ib_id})%vel({dir_id})"] = ParamType.REAL.analytic() - SIMULATION[f"patch_ib({ib_id})%angles({dir_id})"] = ParamType.REAL - SIMULATION[f"patch_ib({ib_id})%angular_vel({dir_id})"] = ParamType.REAL.analytic() - - for cmp_id, cmp in enumerate(["x", "y", "z"]): - cmp_id += 1 - SIMULATION[f'patch_ib({ib_id})%{cmp}_centroid'] = ParamType.REAL - SIMULATION[f'patch_ib({ib_id})%length_{cmp}'] = ParamType.REAL - -for cmp in ["x", "y", "z"]: - SIMULATION[f'bc_{cmp}%beg'] = ParamType.INT - SIMULATION[f'bc_{cmp}%end'] = ParamType.INT - SIMULATION[f'bc_{cmp}%vb1'] = ParamType.REAL - SIMULATION[f'bc_{cmp}%vb2'] = ParamType.REAL - SIMULATION[f'bc_{cmp}%vb3'] = ParamType.REAL - SIMULATION[f'bc_{cmp}%ve1'] = ParamType.REAL - SIMULATION[f'bc_{cmp}%ve2'] = ParamType.REAL - SIMULATION[f'bc_{cmp}%ve3'] = ParamType.REAL - SIMULATION[f'bc_{cmp}%pres_in'] = ParamType.REAL - SIMULATION[f'bc_{cmp}%pres_out'] = ParamType.REAL - SIMULATION[f'bc_{cmp}%grcbc_in'] = ParamType.LOG - SIMULATION[f'bc_{cmp}%grcbc_out'] = ParamType.LOG - SIMULATION[f'bc_{cmp}%grcbc_vel_out'] = ParamType.LOG - - for int_id in range(1, 10+1): - SIMULATION[f"bc_{cmp}%alpha_rho_in({int_id})"] = ParamType.REAL - SIMULATION[f"bc_{cmp}%alpha_in({int_id})"] = ParamType.REAL - - for int_id in range(1, 3+1): - SIMULATION[f"bc_{cmp}%vel_in({int_id})"] = ParamType.REAL - SIMULATION[f"bc_{cmp}%vel_out({int_id})"] = ParamType.REAL - - for var in ["k", "w", "p", "g"]: - SIMULATION[f'{var}_{cmp}'] = ParamType.REAL - SIMULATION[f'bf_{cmp}'] = ParamType.LOG - - - for prepend in ["domain%beg", "domain%end"]: - SIMULATION[f"{cmp}_{prepend}"] = ParamType.REAL - -for probe_id in range(1,10+1): - for cmp in ["x", "y", "z"]: - SIMULATION[f'probe({probe_id})%{cmp}'] = ParamType.REAL - -for f_id in range(1,10+1): - for real_attr in ["gamma", "pi_inf", "G", "cv", "qv", "qvp" ]: - SIMULATION[f"fluid_pp({f_id})%{real_attr}"] = ParamType.REAL - - for re_id in [1, 2]: - SIMULATION[f"fluid_pp({f_id})%Re({re_id})"] = ParamType.REAL - - for mono_id in range(1,4+1): - for int_attr in ["pulse", "support", "num_elements", "element_on", "bb_num_freq"]: - SIMULATION[f"acoustic({mono_id})%{int_attr}"] = ParamType.INT - - SIMULATION[f"acoustic({mono_id})%dipole"] = ParamType.LOG - - for real_attr in ["mag", "length", "height", "wavelength", "frequency", - "gauss_sigma_dist", "gauss_sigma_time", "npulse", - "dir", "delay", "foc_length", "aperture", - "element_spacing_angle", "element_polygon_ratio", - "rotate_angle", "bb_bandwidth", "bb_lowest_freq"]: - SIMULATION[f"acoustic({mono_id})%{real_attr}"] = ParamType.REAL - - for cmp_id in range(1,3+1): - SIMULATION[f"acoustic({mono_id})%loc({cmp_id})"] = ParamType.REAL - - for int_id in range(1,5+1): - for cmp in ["x", "y", "z"]: - SIMULATION[f"integral({int_id})%{cmp}min"] = ParamType.REAL - SIMULATION[f"integral({int_id})%{cmp}max"] = ParamType.REAL - -# Removed: 'fourier_modes%beg', 'fourier_modes%end'. -# Feel free to return them if they are needed once more. -POST_PROCESS = COMMON.copy() -POST_PROCESS.update({ - 't_step_start': ParamType.INT, - 't_step_stop': ParamType.INT, - 't_step_save': ParamType.INT, - 'alt_soundspeed': ParamType.LOG, - 'mixture_err': ParamType.LOG, - 'format': ParamType.INT, - 'schlieren_wrt': ParamType.LOG, - 'schlieren_alpha': ParamType.REAL, - 'fd_order': ParamType.INT, - 'alpha_rho_wrt': ParamType.LOG, - 'rho_wrt': ParamType.LOG, - 'mom_wrt': ParamType.LOG, - 'vel_wrt': ParamType.LOG, - 'flux_lim': ParamType.INT, - 'flux_wrt': ParamType.LOG, - 'E_wrt': ParamType.LOG, - 'fft_wrt': ParamType.LOG, - 'pres_wrt': ParamType.LOG, - 'alpha_wrt': ParamType.LOG, - 'kappa_wrt': ParamType.LOG, - 'gamma_wrt': ParamType.LOG, - 'heat_ratio_wrt': ParamType.LOG, - 'pi_inf_wrt': ParamType.LOG, - 'pres_inf_wrt': ParamType.LOG, - 'cons_vars_wrt': ParamType.LOG, - 'prim_vars_wrt': ParamType.LOG, - 'c_wrt': ParamType.LOG, - 'omega_wrt': ParamType.LOG, - 'qbmm': ParamType.LOG, - 'qm_wrt': ParamType.LOG, - 'liutex_wrt': ParamType.LOG, - 'cf_wrt': ParamType.LOG, - 'sim_data': ParamType.LOG, - 'ib': ParamType.LOG, - 'num_ibs': ParamType.INT, - 'cfl_target': ParamType.REAL, - 't_save': ParamType.REAL, - 't_stop': ParamType.REAL, - 'n_start': ParamType.INT, - 'surface_tension': ParamType.LOG, - 'output_partial_domain': ParamType.LOG, - 'bubbles_lagrange': ParamType.LOG, - 'lag_header': ParamType.LOG, - 'lag_txt_wrt': ParamType.LOG, - 'lag_db_wrt': ParamType.LOG, - 'lag_id_wrt': ParamType.LOG, - 'lag_pos_wrt': ParamType.LOG, - 'lag_pos_prev_wrt': ParamType.LOG, - 'lag_vel_wrt': ParamType.LOG, - 'lag_rad_wrt': ParamType.LOG, - 'lag_rvel_wrt': ParamType.LOG, - 'lag_r0_wrt': ParamType.LOG, - 'lag_rmax_wrt': ParamType.LOG, - 'lag_rmin_wrt': ParamType.LOG, - 'lag_dphidt_wrt': ParamType.LOG, - 'lag_pres_wrt': ParamType.LOG, - 'lag_mv_wrt': ParamType.LOG, - 'lag_mg_wrt': ParamType.LOG, - 'lag_betaT_wrt': ParamType.LOG, - 'lag_betaC_wrt': ParamType.LOG, -}) - -for var in ["R0ref", "p0ref", "rho0ref", "T0ref", "ss", "pv", "vd", - "mu_l", "mu_v", "mu_g", "gam_v", "gam_g", - "M_v", "M_g", "k_v", "k_g", "cp_v", "cp_g", "R_v", "R_g" ]: - POST_PROCESS[f"bub_pp%{var}"] = ParamType.REAL - -for cmp in ["x", "y", "z"]: - for prepend in ["domain%beg", "domain%end", "a", "b"]: - PRE_PROCESS[f"{cmp}_{prepend}"] = ParamType.REAL - -for cmp_id in range(1,3+1): - cmp = ["x", "y", "z"][cmp_id-1] - - POST_PROCESS[f'bc_{cmp}%beg'] = ParamType.INT - POST_PROCESS[f'bc_{cmp}%end'] = ParamType.INT - - POST_PROCESS[f'{cmp}_output%beg'] = ParamType.REAL - POST_PROCESS[f'{cmp}_output%end'] = ParamType.REAL - - for real_attr in ["mom_wrt", "vel_wrt", "flux_wrt", "omega_wrt"]: - POST_PROCESS[f'{real_attr}({cmp_id})'] = ParamType.LOG - -for cmp_id in range(100): - POST_PROCESS[f'chem_wrt_Y({cmp_id})'] = ParamType.LOG -POST_PROCESS['chem_wrt_T'] = ParamType.LOG - -for fl_id in range(1,10+1): - for append, ty in [("schlieren_alpha", ParamType.REAL), ("alpha_rho_wrt", ParamType.LOG), - ("alpha_wrt", ParamType.LOG), ("kappa_wrt", ParamType.LOG), - ("alpha_rho_e_wrt", ParamType.LOG)]: - POST_PROCESS[f'{append}({fl_id})'] = ty - - for real_attr in ["gamma", "pi_inf", "G", "cv", "qv", "qvp" ]: - POST_PROCESS[f"fluid_pp({fl_id})%{real_attr}"] = ParamType.REAL + +def _load_all_params(): + """Load all parameters as {name: ParamType} dict.""" + from ..params import REGISTRY + return {name: param.param_type for name, param in REGISTRY.all_params.items()} + + +def _load_case_optimization_params(): + """Get params that can be hard-coded for GPU optimization.""" + from ..params import REGISTRY + return [name for name, param in REGISTRY.all_params.items() if param.case_optimization] + + +def _build_schema(): + """Build JSON schema from registry.""" + from ..params import REGISTRY + return REGISTRY.get_json_schema() + + +def _get_validator_func(): + """Get the cached validator from registry.""" + from ..params import REGISTRY + return REGISTRY.get_validator() + + +def _get_target_params(): + """Get valid params for each target by parsing Fortran namelists.""" + from ..params.namelist_parser import get_target_params + return get_target_params() + + +# Parameters to ignore during certain operations IGNORE = ["cantera_file", "chemistry"] -ALL = COMMON.copy() -ALL.update(PRE_PROCESS) -ALL.update(SIMULATION) -ALL.update(POST_PROCESS) +# Combined dict of all parameters +ALL = _load_all_params() + +# Parameters that can be hard-coded for GPU case optimization +CASE_OPTIMIZATION = _load_case_optimization_params() + +# JSON schema for validation +SCHEMA = _build_schema() + + +def _is_param_valid_for_target(param_name: str, target_name: str) -> bool: + """ + Check if a parameter is valid for a given target. -CASE_OPTIMIZATION = [ "mapped_weno", "wenoz", "teno", "wenoz_q", "nb", "weno_order", - "num_fluids", "mhd", "relativity", "igr_order", "viscous", - "igr_iter_solver", "igr", "igr_pres_lim", "recon_type", "muscl_order", "muscl_lim" ] + Uses the Fortran namelist definitions as the source of truth. + Handles indexed params like "patch_icpp(1)%geometry" by checking base name. -_properties = { k: v.value for k, v in ALL.items() } + Args: + param_name: The parameter name (may include indices) + target_name: One of 'pre_process', 'simulation', 'post_process' -SCHEMA = { - "type": "object", - "properties": _properties, - "additionalProperties": False -} + Returns: + True if the parameter is valid for the target + """ + target_params = _get_target_params().get(target_name, set()) + + # Extract base parameter name (before any index or attribute) + # e.g., "patch_icpp(1)%geometry" -> "patch_icpp" + # e.g., "fluid_pp(2)%gamma" -> "fluid_pp" + # e.g., "acoustic(1)%loc(1)" -> "acoustic" + match = re.match(r'^([a-zA-Z_][a-zA-Z0-9_]*)', param_name) + if match: + base_name = match.group(1) + return base_name in target_params + + return param_name in target_params def get_input_dict_keys(target_name: str) -> list: - result = { - "pre_process" : PRE_PROCESS, - "simulation" : SIMULATION, - "post_process" : POST_PROCESS - }.get(target_name, {}).keys() + """ + Get parameter keys for a given target. + + Uses the Fortran namelist definitions as the source of truth. + Only returns params whose base name is in the target's namelist. + + Args: + target_name: One of 'pre_process', 'simulation', 'post_process' + + Returns: + List of parameter names valid for that target + """ + keys = [k for k in ALL.keys() if _is_param_valid_for_target(k, target_name)] - if not ARG("case_optimization") or target_name != "simulation": - return result + # Case optimization filtering for simulation + if ARG("case_optimization", dflt=False) and target_name == "simulation": + keys = [k for k in keys if k not in CASE_OPTIMIZATION] - return [ x for x in result if x not in CASE_OPTIMIZATION ] + return keys -@cache def get_validator(): - return fastjsonschema.compile(SCHEMA) + """Get the cached JSON schema validator.""" + return _get_validator_func() diff --git a/toolchain/mfc/run/input.py b/toolchain/mfc/run/input.py index 8a90b9c910..1d5e636961 100644 --- a/toolchain/mfc/run/input.py +++ b/toolchain/mfc/run/input.py @@ -1,10 +1,11 @@ import os, json, glob, typing, dataclasses -import pyrometheus as pyro -import cantera as ct +# Note: pyrometheus and cantera are imported lazily in the methods that need them +# to avoid slow startup times for commands that don't use chemistry features +# Note: build is imported lazily to avoid circular import with build.py from ..printer import cons -from .. import common, build +from .. import common from ..state import ARGS, ARG, gpuConfigOptions from ..case import Case from .. import case_validator @@ -20,6 +21,7 @@ def __init__(self, filename: str, dirpath: str, params: dict) -> None: self.dirpath = dirpath def generate_inp(self, target) -> None: + from .. import build # pylint: disable=import-outside-toplevel target = build.get_target(target) # Save .inp input file @@ -34,7 +36,10 @@ def __save_fpp(self, target, contents: str) -> None: cons.print("Writing a (new) custom case.fpp file.") common.file_write(fpp_path, contents, True) - def get_cantera_solution(self) -> ct.Solution: + def get_cantera_solution(self): + # Lazy import to avoid slow startup for commands that don't need chemistry + import cantera as ct # pylint: disable=import-outside-toplevel + if self.params.get("chemistry", 'F') == 'T': cantera_file = self.params["cantera_file"] @@ -58,6 +63,9 @@ def get_cantera_solution(self) -> ct.Solution: raise common.MFCException(f"Cantera file '{cantera_file}' not found. Searched: {', '.join(candidates)}.") def generate_fpp(self, target) -> None: + # Lazy import to avoid slow startup for commands that don't need chemistry + import pyrometheus as pyro # pylint: disable=import-outside-toplevel + if target.isDependency: return @@ -97,6 +105,7 @@ def generate_fpp(self, target) -> None: def validate_constraints(self, target) -> None: """Validate case parameter constraints for a given target stage""" + from .. import build # pylint: disable=import-outside-toplevel target_obj = build.get_target(target) stage = target_obj.name @@ -114,6 +123,7 @@ def generate(self, target) -> None: self.generate_fpp(target) def clean(self, _targets) -> None: + from .. import build # pylint: disable=import-outside-toplevel targets = [build.get_target(target) for target in _targets] files = set() @@ -181,8 +191,13 @@ def load(filepath: str = None, args: typing.List[str] = None, empty_data: dict = raise common.MFCException(f"Input file {filename} terminated with a non-zero exit code. Please make sure running the file doesn't produce any errors.") elif filename.endswith(".json"): json_str = common.file_read(filename) + elif filename.endswith((".yaml", ".yml")): + import yaml # pylint: disable=import-outside-toplevel + with open(filename, 'r') as f: + dictionary = yaml.safe_load(f) + json_str = json.dumps(dictionary) else: - raise common.MFCException("Unrecognized input file format. Only .py and .json files are supported. Please check the README and sample cases in the examples directory.") + raise common.MFCException("Unrecognized input file format. Supported: .py, .json, .yaml, .yml. Please check the README and sample cases in the examples directory.") try: dictionary = json.loads(json_str) diff --git a/toolchain/mfc/run/run.py b/toolchain/mfc/run/run.py index fc2366e497..95747c3500 100644 --- a/toolchain/mfc/run/run.py +++ b/toolchain/mfc/run/run.py @@ -144,7 +144,18 @@ def __execute_job_script(qsystem: queues.QueueSystem): # in the correct directory. cmd = qsystem.gen_submit_cmd(__job_script_filepath()) - if system(cmd, cwd=os.path.dirname(ARG("input"))).returncode != 0: + verbosity = ARG('verbose') + + # At verbosity >= 1, show the command being executed + if verbosity >= 1: + cons.print(f" [dim]$ {' '.join(str(c) for c in cmd)}[/dim]") + cons.print() + + # Execute the job script with appropriate output handling + # At verbosity >= 2, show print_cmd=True for system() calls + print_cmd = verbosity >= 2 + + if system(cmd, cwd=os.path.dirname(ARG("input")), print_cmd=print_cmd).returncode != 0: raise MFCException(f"Submitting batch file for {qsystem.name} failed. It can be found here: {__job_script_filepath()}. Please check the file for errors.") @@ -154,6 +165,8 @@ def run(targets = None, case = None): build(targets) + verbosity = ARG('verbose') + cons.print("[bold]Run[/bold]") cons.indent() @@ -166,14 +179,29 @@ def run(targets = None, case = None): qsystem = queues.get_system() cons.print(f"Using queue system [magenta]{qsystem.name}[/magenta].") + # At verbosity >= 1, show more details about what's happening + if verbosity >= 1: + cons.print(f" [dim]Targets: {', '.join(t.name for t in targets)}[/dim]") + cons.print(f" [dim]Input file: {ARG('input')}[/dim]") + if ARG("nodes") > 1 or ARG("tasks_per_node") > 1: + cons.print(f" [dim]MPI: {ARG('nodes')} nodes × {ARG('tasks_per_node')} tasks/node = {ARG('nodes') * ARG('tasks_per_node')} total ranks[/dim]") + __generate_job_script(targets, case) __validate_job_options() __generate_input_files(targets, case) + if verbosity >= 2: + cons.print(f" [dim]Job script: {__job_script_filepath()}[/dim]") + if not ARG("dry_run"): if ARG("output_summary") is not None: file_dump_yaml(ARG("output_summary"), { "invocation": sys.argv[1:], "lock": dataclasses.asdict(CFG()) }) + + if verbosity >= 1: + cons.print() + cons.print("[bold]Executing simulation...[/bold]") + __execute_job_script(qsystem) diff --git a/toolchain/mfc/test/test.py b/toolchain/mfc/test/test.py index 78d40d9eba..31a3771cb9 100644 --- a/toolchain/mfc/test/test.py +++ b/toolchain/mfc/test/test.py @@ -2,6 +2,7 @@ from random import sample, seed import rich, rich.table +from rich.panel import Panel from ..printer import cons from .. import common @@ -22,6 +23,8 @@ current_test_number = 0 total_test_count = 0 errors = [] +failed_tests = [] # Track failed test details for summary +test_start_time = None # Track overall test duration # Early abort thresholds MIN_CASES_BEFORE_ABORT = 20 @@ -107,10 +110,11 @@ def __filter(cases_) -> typing.List[TestCase]: return selected_cases, skipped_cases def test(): - # pylint: disable=global-statement, global-variable-not-assigned, too-many-statements + # pylint: disable=global-statement, global-variable-not-assigned, too-many-statements, too-many-locals global nFAIL, nPASS, nSKIP, total_test_count - global errors + global errors, failed_tests, test_start_time + test_start_time = time.time() # Start timing cases = list_cases() # Delete UUIDs that are not in the list of cases from tests/ @@ -164,7 +168,7 @@ def test(): # Run cases with multiple threads (if available) cons.print() - cons.print(" Progress [bold magenta]UUID[/bold magenta] (s) Summary") + cons.print(" Progress Test Name Time(s) UUID") cons.print() # Select the correct number of threads to use to launch test cases @@ -193,23 +197,88 @@ def test(): nSKIP = len(skipped_cases) cons.print() cons.unindent() - cons.print(f"\nTest Summary: [bold green]{nPASS}[/bold green] passed, [bold red]{nFAIL}[/bold red] failed, [bold yellow]{nSKIP}[/bold yellow] skipped.\n") - # Print a summary of all errors at the end if errors exist - if len(errors) != 0: - cons.print(f"[bold red]Failed Cases[/bold red]\n") - for e in errors: - cons.print(e) + # Calculate total test duration + total_duration = time.time() - test_start_time + minutes = int(total_duration // 60) + seconds = total_duration % 60 - # Print the list of skipped cases - if len(skipped_cases) != 0: - cons.print("[bold yellow]Skipped Cases[/bold yellow]\n") - for c in skipped_cases: - cons.print(f"[bold yellow]{c.trace}[/bold yellow]") + # Build the summary report + _print_test_summary(nPASS, nFAIL, nSKIP, minutes, seconds, failed_tests, skipped_cases) exit(nFAIL) +def _print_test_summary(passed: int, failed: int, skipped: int, minutes: int, seconds: float, + failed_test_list: list, _skipped_cases: list): + # pylint: disable=too-many-arguments, too-many-positional-arguments, too-many-locals + """Print a comprehensive test summary report.""" + total = passed + failed + skipped + + # Build summary header + if failed == 0: + status_icon = "[bold green]✓[/bold green]" + status_text = "[bold green]ALL TESTS PASSED[/bold green]" + border_style = "green" + else: + status_icon = "[bold red]✗[/bold red]" + status_text = f"[bold red]{failed} TEST{'S' if failed != 1 else ''} FAILED[/bold red]" + border_style = "red" + + # Format time string + if minutes > 0: + time_str = f"{minutes}m {seconds:.1f}s" + else: + time_str = f"{seconds:.1f}s" + + # Build summary content + summary_lines = [ + f"{status_icon} {status_text}", + "", + f" [bold green]{passed:4d}[/bold green] passed", + f" [bold red]{failed:4d}[/bold red] failed", + f" [bold yellow]{skipped:4d}[/bold yellow] skipped", + f" [dim]{'─' * 12}[/dim]", + f" [bold]{total:4d}[/bold] total", + "", + f" [dim]Time: {time_str}[/dim]", + ] + + # Add failed tests details if any + if failed_test_list: + summary_lines.append("") + summary_lines.append(" [bold red]Failed Tests:[/bold red]") + for test_info in failed_test_list[:10]: # Limit to first 10 + trace = test_info.get('trace', 'Unknown') + uuid = test_info.get('uuid', 'Unknown') + error_type = test_info.get('error_type', '') + if len(trace) > 40: + trace = trace[:37] + "..." + summary_lines.append(f" [red]•[/red] {trace}") + summary_lines.append(f" [dim]UUID: {uuid}[/dim]") + if error_type: + summary_lines.append(f" [dim]({error_type})[/dim]") + if len(failed_test_list) > 10: + summary_lines.append(f" [dim]... and {len(failed_test_list) - 10} more[/dim]") + + # Add next steps for failures + if failed > 0: + summary_lines.append("") + summary_lines.append(" [bold]Next Steps:[/bold]") + summary_lines.append(" • Run with [cyan]--generate[/cyan] to update golden files (if changes are intentional)") + summary_lines.append(" • Check individual test output in [cyan]tests//[/cyan]") + summary_lines.append(" • Run specific test: [cyan]./mfc.sh test --only [/cyan]") + + cons.print() + cons.raw.print(Panel( + "\n".join(summary_lines), + title="[bold]Test Summary[/bold]", + border_style=border_style, + padding=(1, 2) + )) + cons.print() + + # pylint: disable=too-many-locals, too-many-branches, too-many-statements, trailing-whitespace def _process_silo_file(silo_filepath: str, case: TestCase, out_filepath: str): """Process a single silo file with h5dump and check for NaNs/Infinities.""" @@ -259,7 +328,9 @@ def _handle_case(case: TestCase, devices: typing.Set[int]): case.create_directory() if ARG("dry_run"): - cons.print(f" [bold magenta]{case.get_uuid()}[/bold magenta] SKIP {case.trace}") + # Truncate long traces for readability + trace_display = case.trace if len(case.trace) <= 50 else case.trace[:47] + "..." + cons.print(f" (dry-run) {trace_display:50s} SKIP [magenta]{case.get_uuid()}[/magenta]") timeout_timer.cancel() return @@ -335,7 +406,9 @@ def _handle_case(case: TestCase, devices: typing.Set[int]): current_test_number += 1 progress_str = f"({current_test_number:3d}/{total_test_count:3d})" - cons.print(f" {progress_str} [bold magenta]{case.get_uuid()}[/bold magenta] {duration:6.2f} {case.trace}") + # Truncate long traces for readability, showing test name prominently + trace_display = case.trace if len(case.trace) <= 50 else case.trace[:47] + "..." + cons.print(f" {progress_str} {trace_display:50s} {duration:6.2f} [magenta]{case.get_uuid()}[/magenta]") except TestTimeoutError as exc: log_path = os.path.join(case.get_dirpath(), 'out_pre_sim.txt') @@ -357,7 +430,7 @@ def _handle_case(case: TestCase, devices: typing.Set[int]): def handle_case(case: TestCase, devices: typing.Set[int]): # pylint: disable=global-statement, global-variable-not-assigned global nFAIL, nPASS, nSKIP - global errors + global errors, failed_tests # Check if we should abort before processing this case if abort_tests.is_set(): @@ -382,7 +455,52 @@ def handle_case(case: TestCase, devices: typing.Set[int]): if nAttempts < max_attempts: continue nFAIL += 1 - cons.print(f"[bold red]Failed test {case} after {nAttempts} attempt(s).[/bold red]") + + # Enhanced real-time failure feedback + trace_display = case.trace if len(case.trace) <= 50 else case.trace[:47] + "..." + cons.print() + cons.print(f" [bold red]✗ FAILED:[/bold red] {trace_display}") + cons.print(f" UUID: [magenta]{case.get_uuid()}[/magenta]") + cons.print(f" Attempts: {nAttempts}") + + # Show truncated error message + exc_str = str(exc) + if len(exc_str) > 300: + exc_str = exc_str[:297] + "..." + cons.print(f" Error: {exc_str}") + + # Provide helpful hints based on error type + exc_lower = str(exc).lower() + if "tolerance" in exc_lower or "golden" in exc_lower or "mismatch" in exc_lower: + cons.print(f" [dim]Hint: Consider --generate to update golden files or check tolerances[/dim]") + elif "timeout" in exc_lower: + cons.print(f" [dim]Hint: Test may be hanging - check case configuration[/dim]") + elif "nan" in exc_lower: + cons.print(f" [dim]Hint: NaN detected - check numerical stability of the case[/dim]") + elif "failed to execute" in exc_lower: + cons.print(f" [dim]Hint: Check build logs and case parameters[/dim]") + cons.print() + + # Track failed test details for summary + error_type = "" + exc_lower = str(exc).lower() + if "tolerance" in exc_lower or "golden" in exc_lower or "mismatch" in exc_lower: + error_type = "tolerance mismatch" + elif "timeout" in exc_lower: + error_type = "timeout" + elif "nan" in exc_lower: + error_type = "NaN detected" + elif "failed to execute" in exc_lower: + error_type = "execution failed" + + failed_tests.append({ + 'trace': case.trace, + 'uuid': case.get_uuid(), + 'error_type': error_type, + 'attempts': nAttempts + }) + + # Still collect for final summary errors.append(f"[bold red]Failed test {case} after {nAttempts} attempt(s).[/bold red]") errors.append(f"{exc}") diff --git a/toolchain/mfc/user_guide.py b/toolchain/mfc/user_guide.py new file mode 100644 index 0000000000..d83597e98d --- /dev/null +++ b/toolchain/mfc/user_guide.py @@ -0,0 +1,748 @@ +""" +User guide, help, tips, and onboarding for MFC toolchain. + +This module provides: +- Enhanced help output with Rich formatting +- Contextual tips after errors/failures +- Interactive mode with menu +- Onboarding for new users +- Topic-based help system +""" + +import os +import subprocess +import re + +from rich.panel import Panel +from rich.table import Table +from rich.prompt import Prompt +from rich.markdown import Markdown +from rich import box + +from .printer import cons +from .common import MFC_ROOT_DIR + +# Import command definitions from CLI schema (SINGLE SOURCE OF TRUTH) +from .cli.commands import COMMANDS + + +# ============================================================================= +# DYNAMIC CLUSTER HELP GENERATION +# ============================================================================= + +# Organization mapping based on system name prefixes and known clusters +CLUSTER_ORGS = { + "OLCF": "ORNL", + "LLNL": "LLNL", + "PSC": "ACCESS", + "SDSC": "ACCESS", + "NCSA": "ACCESS", + "GT": "Georgia Tech", + "Brown": "Brown", + "DoD": "DoD", + "Richardson": "Caltech", + "hipergator": "Florida", + "CSCS": "CSCS", +} + +# Explicit slug-to-org overrides (for cases where modules file naming is inconsistent) +SLUG_ORG_OVERRIDE = { + "tuo": "LLNL", # Tuolumne is at LLNL, not ORNL (modules file says "OLCF" incorrectly) +} + +# Display name overrides for clusters +SLUG_NAME_OVERRIDE = { + "h": "HiPerGator", # Proper capitalization +} + +# Display order and colors for organizations +ORG_ORDER = ["ORNL", "LLNL", "ACCESS", "Georgia Tech", "Caltech", "Brown", "DoD", "Florida", "CSCS"] +ORG_COLORS = { + "ORNL": "yellow", + "LLNL": "yellow", + "ACCESS": "yellow", + "Georgia Tech": "yellow", + "Caltech": "yellow", + "Brown": "yellow", + "DoD": "yellow", + "Florida": "yellow", +} + + +def _parse_modules_file(): + """Parse the modules file to extract cluster information. + + Returns a dict: {slug: {"name": full_name, "org": organization}} + """ + modules_path = os.path.join(MFC_ROOT_DIR, "toolchain", "modules") + clusters = {} + + try: + with open(modules_path, "r", encoding="utf-8") as f: + for line in f: + line = line.strip() + # Skip comments and empty lines + if not line or line.startswith("#"): + continue + # Skip lines with -all, -cpu, -gpu (module definitions) + if "-all" in line or "-cpu" in line or "-gpu" in line: + continue + + # Parse cluster definition lines: "slug System Name" + match = re.match(r'^(\S+)\s+(.+)$', line) + if match: + slug = match.group(1) + full_name = match.group(2).strip() + + # Check for explicit org override first + if slug in SLUG_ORG_OVERRIDE: + org = SLUG_ORG_OVERRIDE[slug] + else: + # Determine organization from name + org = "Other" + for prefix, org_name in CLUSTER_ORGS.items(): + if prefix in full_name or full_name.lower() == prefix.lower(): + org = org_name + break + + clusters[slug] = {"name": full_name, "org": org} + except FileNotFoundError: + # Fallback if modules file not found + pass + + return clusters + + +def _get_cluster_short_name(slug, full_name): + """Get display name for a cluster, with overrides and prefix stripping.""" + if slug in SLUG_NAME_OVERRIDE: + return SLUG_NAME_OVERRIDE[slug] + # Strip org prefix if present + for prefix in CLUSTER_ORGS: + if full_name.startswith(prefix + " "): + return full_name[len(prefix) + 1:] + return full_name + + +def _generate_clusters_content(): + """Generate the clusters help content dynamically from modules file.""" + clusters = _parse_modules_file() + + # Group clusters by organization + org_clusters = {org: [] for org in ORG_ORDER} + org_clusters["Other"] = [] + + for slug, info in clusters.items(): + org = info["org"] + if org not in org_clusters: + org_clusters["Other"].append((slug, info["name"])) + else: + org_clusters[org].append((slug, info["name"])) + + # Build the cluster list section + cluster_lines = [] + for org in ORG_ORDER: + if not org_clusters.get(org): + continue + # Format: " [yellow]ORG:[/yellow] [cyan]slug[/cyan]=Name [cyan]slug2[/cyan]=Name2" + entries = [ + f"[cyan]{slug}[/cyan]={_get_cluster_short_name(slug, name)}" + for slug, name in org_clusters[org] + ] + color = ORG_COLORS.get(org, 'yellow') + cluster_lines.append(f" [{color}]{org}:[/{color}] " + " ".join(entries)) + + # Handle "Other" if any + if org_clusters.get("Other"): + entries = [f"[cyan]{slug}[/cyan]={name}" for slug, name in org_clusters["Other"]] + cluster_lines.append(f" [yellow]Other:[/yellow] " + " ".join(entries)) + + cluster_list = "\n".join(cluster_lines) if cluster_lines else " [dim]No clusters found in modules file[/dim]" + + # Return full help content with dynamic cluster list + return f"""\ +[bold cyan]Supported HPC Clusters[/bold cyan] + +MFC includes pre-configured module sets for many clusters. + +[bold]Loading Cluster Modules:[/bold] + [green]source ./mfc.sh load -c -m [/green] + +[bold]Available Clusters:[/bold] +{cluster_list} + +[bold]Modes:[/bold] + [cyan]c[/cyan] or [cyan]cpu[/cyan] - CPU only + [cyan]g[/cyan] or [cyan]gpu[/cyan] - GPU enabled + +[bold]Examples:[/bold] + [green]source ./mfc.sh load -c p -m g[/green] Phoenix with GPU + [green]source ./mfc.sh load -c f -m g[/green] Frontier with GPU (AMD MI250X) + [green]source ./mfc.sh load -c d -m c[/green] Delta CPU-only + +[bold]Custom Clusters:[/bold] + For unlisted clusters, manually load: + • Fortran compiler (gfortran, nvfortran, amdflang, etc.) + • MPI implementation (OpenMPI, MPICH, Cray-MPICH) + • CMake 3.18+, Python 3.11+""" + + +# ============================================================================= +# MARKDOWN-BASED HELP (Single source of truth from docs/) +# ============================================================================= + +# Mapping of help topics to their source markdown files and optional section +# Format: {"topic": ("file_path", "section_heading" or None for full file)} +MARKDOWN_HELP_FILES = { + "debugging": ("docs/documentation/troubleshooting.md", None), # Full file + "gpu": ("docs/documentation/running.md", "Running on GPUs"), # Section only + "batch": ("docs/documentation/running.md", "Batch Execution"), # Section only + "performance": ("docs/documentation/expectedPerformance.md", "Achieving Maximum Performance"), +} + + +def _extract_markdown_section(content: str, section_heading: str) -> str: + """Extract a specific section from markdown content. + + Extracts from the given heading until the next heading of same or higher level, + or until a horizontal rule (---). + """ + # Find the section heading (## or ###) + # Note: In f-strings, literal braces must be doubled: {{1,3}} -> {1,3} + pattern = rf'^(#{{1,3}})\s+{re.escape(section_heading)}\s*$' + match = re.search(pattern, content, re.MULTILINE) + if not match: + return None + + start_pos = match.end() + + # Find the end: horizontal rule (---) which separates major sections + # Note: We use --- instead of heading detection because shell comments + # inside code blocks (# comment) look like markdown headings to regex + end_pattern = r'^---' + end_match = re.search(end_pattern, content[start_pos:], re.MULTILINE) + + if end_match: + section = content[start_pos:start_pos + end_match.start()] + else: + section = content[start_pos:] + + return section.strip() + + +def _load_markdown_help(topic: str) -> str: + """Load help content from a markdown file. + + Can load full file or extract a specific section. + Strips Doxygen-specific syntax and returns clean markdown. + """ + if topic not in MARKDOWN_HELP_FILES: + return None + + file_path, section = MARKDOWN_HELP_FILES[topic] + filepath = os.path.join(MFC_ROOT_DIR, file_path) + + try: + with open(filepath, "r", encoding="utf-8") as f: + content = f.read() + except FileNotFoundError: + return None + + # Extract section if specified + if section: + content = _extract_markdown_section(content, section) + if content is None: + return None + + # Strip Doxygen-specific syntax + # Remove @page directives + content = re.sub(r'^@page\s+\S+\s+.*$', '', content, flags=re.MULTILINE) + # Remove @ref, @see directives (but keep the text after them readable) + content = re.sub(r'@(ref|see)\s+"([^"]+)"', r'\2', content) # @ref "Text" -> Text + content = re.sub(r'@(ref|see)\s+(\S+)', '', content) # @ref name -> (remove) + # Clean up any resulting empty lines at the start + content = content.lstrip('\n') + + return content + + +def _generate_markdown_help(topic: str): + """Generate a function that loads markdown help for a topic.""" + def loader(): + return _load_markdown_help(topic) + return loader + + +# ============================================================================= +# HELP TOPICS +# ============================================================================= + +HELP_TOPICS = { + "gpu": { + "title": "Running on GPUs", + # Content loaded from docs/documentation/running.md "Running on GPUs" section + "content": _generate_markdown_help("gpu"), + "markdown": True, + }, + "clusters": { + "title": "Cluster Configuration", + # Content is generated dynamically from toolchain/modules file + "content": _generate_clusters_content, + }, + "batch": { + "title": "Batch Job Submission", + # Content loaded from docs/documentation/running.md "Batch Execution" section + "content": _generate_markdown_help("batch"), + "markdown": True, + }, + "debugging": { + "title": "Debugging & Troubleshooting", + # Content loaded from docs/documentation/troubleshooting.md + "content": _generate_markdown_help("debugging"), + "markdown": True, + }, + "performance": { + "title": "Performance Optimization", + # Content loaded from docs/documentation/expectedPerformance.md "Achieving Maximum Performance" section + "content": _generate_markdown_help("performance"), + "markdown": True, + }, +} + + +def print_topic_help(topic: str): + """Print help for a specific topic.""" + if topic not in HELP_TOPICS: + cons.print(f"[red]Unknown topic: {topic}[/red]") + cons.print() + cons.print("[bold]Available topics:[/bold]") + for t, info in HELP_TOPICS.items(): + cons.print(f" [green]{t:12}[/green] {info['title']}") + cons.print() + cons.print("[dim]Usage: ./mfc.sh help [/dim]") + return + + topic_info = HELP_TOPICS[topic] + # Support callable content for dynamic generation + content = topic_info["content"] + if callable(content): + content = content() + + if content is None: + cons.print(f"[red]Could not load help for topic: {topic}[/red]") + return + + cons.print() + + # Check if content should be rendered as markdown + if topic_info.get("markdown", False): + # Render markdown content directly (no panel - markdown has its own formatting) + cons.print(f"[bold cyan]{topic_info['title']}[/bold cyan]") + cons.print() + cons.raw.print(Markdown(content)) + else: + # Render as Rich markup in a panel + cons.raw.print(Panel( + content, + title=f"[bold]{topic_info['title']}[/bold]", + box=box.ROUNDED, + padding=(1, 2) + )) + cons.print() + + +def print_help_topics(): + """Print list of available help topics.""" + cons.print() + cons.raw.print(Panel( + "[bold cyan]MFC Help System[/bold cyan]", + box=box.ROUNDED, + padding=(0, 2) + )) + cons.print() + + table = Table(box=box.SIMPLE, show_header=False, padding=(0, 2)) + table.add_column("Topic", style="green") + table.add_column("Description") + + for topic, info in HELP_TOPICS.items(): + table.add_row(topic, info["title"]) + + cons.raw.print(table) + cons.print() + cons.print("[dim]Usage: [cyan]./mfc.sh help [/cyan][/dim]") + cons.print("[dim]Example: [cyan]./mfc.sh help gpu[/cyan][/dim]") + cons.print() + + +# ============================================================================= +# ENHANCED HELP OUTPUT +# ============================================================================= + +def print_help(): + """Print enhanced, colorized help overview.""" + + # Header + cons.print() + cons.raw.print(Panel( + "[bold cyan]MFC[/bold cyan] - [dim]Multi-component Flow Code[/dim]\n" + "[dim]Exascale CFD solver for compressible multi-phase flows[/dim]", + box=box.ROUNDED, + padding=(0, 2) + )) + cons.print() + + # Commands table + table = Table( + title="[bold]Commands[/bold]", + box=box.SIMPLE, + show_header=True, + header_style="bold cyan", + title_justify="left", + padding=(0, 2) + ) + table.add_column("Command", style="green", no_wrap=True) + table.add_column("Alias", style="dim", no_wrap=True) + table.add_column("Description", style="white") + + # Primary commands with aliases + for cmd in ["build", "run", "test", "validate", "new", "clean"]: + alias = COMMANDS[cmd].get("alias", "") + alias_str = alias if alias else "" + table.add_row(cmd, alias_str, COMMANDS[cmd]["description"]) + + table.add_row("", "", "") # Spacer + + # Secondary commands + for cmd in ["params", "count", "packer", "load"]: + table.add_row(f"[dim]{cmd}[/dim]", "", f"[dim]{COMMANDS[cmd]['description']}[/dim]") + + table.add_row("", "", "") # Spacer + table.add_row("[dim]help[/dim]", "", "[dim]Show help on a topic (gpu, clusters, batch, debugging)[/dim]") + + cons.raw.print(table) + cons.print() + + # Quick start + cons.raw.print(Panel( + "[bold]Quick Start[/bold]\n\n" + " [green]1.[/green] [cyan]./mfc.sh new my_case[/cyan] Create a new case\n" + " [green]2.[/green] [cyan]vim my_case/case.py[/cyan] Edit parameters\n" + " [green]3.[/green] [cyan]./mfc.sh validate my_case/case.py[/cyan] Check for errors\n" + " [green]4.[/green] [cyan]./mfc.sh build -j $(nproc)[/cyan] Build MFC\n" + " [green]5.[/green] [cyan]./mfc.sh run my_case/case.py[/cyan] Run simulation", + box=box.ROUNDED, + border_style="green", + padding=(1, 2) + )) + cons.print() + + # Footer + cons.print("[dim]Run [cyan]./mfc.sh --help[/cyan] for detailed options[/dim]") + cons.print("[dim]Run [cyan]./mfc.sh help [/cyan] for topic help (gpu, clusters, batch, debugging)[/dim]") + cons.print() + + +def print_command_help(command: str, show_argparse: bool = True): + """Print enhanced help for a specific command.""" + if command not in COMMANDS: + cons.print(f"[red]Unknown command: {command}[/red]") + return False + + cmd = COMMANDS[command] + alias = cmd.get("alias", "") + alias_str = f" [dim](alias: {alias})[/dim]" if alias else "" + + # Header panel + cons.print() + cons.raw.print(Panel( + f"[bold cyan]{command}[/bold cyan]{alias_str}\n" + f"[dim]{cmd['description']}[/dim]", + box=box.ROUNDED, + padding=(0, 2) + )) + cons.print() + + # Examples + if cmd.get("examples"): + cons.print("[bold]Examples:[/bold]") + for example, desc in cmd["examples"]: + cons.print(f" [green]{example}[/green]") + cons.print(f" [dim]{desc}[/dim]") + cons.print() + + # Key options + if cmd.get("key_options"): + cons.print("[bold]Key Options:[/bold]") + for opt, desc in cmd["key_options"]: + cons.print(f" [cyan]{opt:24}[/cyan] {desc}") + cons.print() + if show_argparse: + cons.print("[dim]Run with --help for full option list[/dim]") + cons.print() + + return True + + +# ============================================================================= +# CONTEXTUAL TIPS +# ============================================================================= + +class Tips: + """Contextual tips shown after various events.""" + + @staticmethod + def after_build_failure(): + """Show tips after a build failure.""" + cons.print() + cons.raw.print(Panel( + "[bold yellow]Troubleshooting Tips[/bold yellow]\n\n" + " [cyan]1.[/cyan] Run with [green]--debug-log[/green] to see detailed output\n" + " [cyan]2.[/cyan] Check [green]docs/documentation/troubleshooting.md[/green]\n" + " [cyan]3.[/cyan] Ensure required modules are loaded: [green]source ./mfc.sh load -c -m [/green]\n" + " [cyan]4.[/cyan] Try [green]./mfc.sh clean[/green] and rebuild", + box=box.ROUNDED, + border_style="yellow", + padding=(0, 2) + )) + + @staticmethod + def after_case_error(case_path: str = None): + """Show tips after a case file error.""" + msg = "[bold yellow]Tip[/bold yellow]\n\n" + if case_path: + msg += f" Run [green]./mfc.sh validate {case_path}[/green] to check your case file for errors" + else: + msg += " Run [green]./mfc.sh validate [/green] to check your case file for errors" + + cons.print() + cons.raw.print(Panel(msg, box=box.ROUNDED, border_style="yellow", padding=(0, 2))) + + @staticmethod + def after_test_failure(failed_uuids: list = None): + """Show tips after test failures.""" + lines = [ + "[bold yellow]Next Steps[/bold yellow]\n", + " [cyan]1.[/cyan] Check individual test output in [green]tests//[/green]", + " [cyan]2.[/cyan] Run specific test: [green]./mfc.sh test --only [/green]", + " [cyan]3.[/cyan] Update golden files (if changes are intentional): [green]./mfc.sh test --generate[/green]", + ] + + if failed_uuids and len(failed_uuids) <= 3: + lines.append("") + lines.append(" [bold]Failed tests:[/bold]") + for uuid in failed_uuids: + lines.append(f" [red]•[/red] {uuid}") + + cons.print() + cons.raw.print(Panel("\n".join(lines), box=box.ROUNDED, border_style="yellow", padding=(0, 2))) + + @staticmethod + def after_run_failure(): + """Show tips after a run failure.""" + cons.print() + cons.raw.print(Panel( + "[bold yellow]Troubleshooting Tips[/bold yellow]\n\n" + " [cyan]1.[/cyan] Validate your case: [green]./mfc.sh validate case.py[/green]\n" + " [cyan]2.[/cyan] Check the output in [green]/[/green]\n" + " [cyan]3.[/cyan] Run with [green]--debug-log[/green] for more details\n" + " [cyan]4.[/cyan] Check MFC documentation: [green]docs/[/green]", + box=box.ROUNDED, + border_style="yellow", + padding=(0, 2) + )) + + @staticmethod + def suggest_validate(): + """Generic suggestion to use validate.""" + cons.print() + cons.print("[dim]Tip: Run [cyan]./mfc.sh validate case.py[/cyan] to check for errors before running[/dim]") + + +# ============================================================================= +# ONBOARDING FOR NEW USERS +# ============================================================================= + +def is_first_time_user() -> bool: + """Check if this is a first-time user (no build directory).""" + build_dir = os.path.join(MFC_ROOT_DIR, "build") + return not os.path.exists(build_dir) + + +def print_welcome(): + """Print welcome message for new users.""" + cons.print() + cons.raw.print(Panel( + "[bold cyan]Welcome to MFC![/bold cyan]\n\n" + "It looks like this is your first time using MFC. Here's how to get started:\n\n" + " [green]1.[/green] [bold]Load environment[/bold] (HPC clusters):\n" + " [cyan]source ./mfc.sh load -c -m [/cyan]\n" + " Example: [dim]source ./mfc.sh load -c p -m g[/dim] (Phoenix, GPU)\n\n" + " [green]2.[/green] [bold]Create a new case[/bold]:\n" + " [cyan]./mfc.sh new my_first_case[/cyan]\n\n" + " [green]3.[/green] [bold]Build MFC[/bold]:\n" + " [cyan]./mfc.sh build -j $(nproc)[/cyan]\n\n" + " [green]4.[/green] [bold]Run your simulation[/bold]:\n" + " [cyan]./mfc.sh run my_first_case/case.py[/cyan]\n\n" + "[bold yellow]Optional:[/bold yellow] Enable tab completion for your shell:\n" + " [cyan]./mfc.sh completion install[/cyan]\n\n" + "[dim]Run [cyan]./mfc.sh --help[/cyan] for all available commands[/dim]\n" + "[dim]Run [cyan]./mfc.sh interactive[/cyan] for a guided menu[/dim]", + title="[bold]Getting Started[/bold]", + box=box.DOUBLE, + border_style="cyan", + padding=(1, 2) + )) + cons.print() + + +# ============================================================================= +# INTERACTIVE MODE +# ============================================================================= + +def interactive_mode(): + """Run interactive menu-driven interface.""" + + while True: + cons.print() + cons.raw.print(Panel( + "[bold cyan]MFC Interactive Mode[/bold cyan]", + box=box.ROUNDED, + padding=(0, 2) + )) + cons.print() + + # Menu options + options = [ + ("1", "Create a new case", "new"), + ("2", "Validate a case file", "validate"), + ("3", "Build MFC", "build"), + ("4", "Run a simulation", "run"), + ("5", "Run tests", "test"), + ("6", "Clean build files", "clean"), + ("7", "Show help", "help"), + ("q", "Quit", None), + ] + + for key, label, _ in options: + if key == "q": + cons.print(f" [red]{key}[/red]) {label}") + else: + cons.print(f" [green]{key}[/green]) {label}") + + cons.print() + choice = Prompt.ask("[bold]Select an option[/bold]", choices=[o[0] for o in options], default="q") + + if choice == "q": + cons.print("[dim]Goodbye![/dim]") + break + + if choice == "7": + print_help() + continue + + # Get the command for the selected option + cmd = next((o[2] for o in options if o[0] == choice), None) + if cmd is None: + continue + + cons.print() + + # Dispatch to handler + handlers = { + "new": _interactive_new, + "validate": _interactive_validate, + "build": _interactive_build, + "run": _interactive_run, + "test": _interactive_test, + "clean": _interactive_clean, + } + if cmd in handlers: + handlers[cmd]() + + +def _run_mfc_command(args: list): + """Run an MFC command safely using subprocess.""" + cmd_str = " ".join(args) + cons.print() + cons.print(f"[dim]Running: {cmd_str}[/dim]") + cons.print() + try: + subprocess.run(args, check=False) + except FileNotFoundError: + cons.print(f"[red]Command not found: {args[0]}[/red]") + + +def _interactive_new(): + """Interactive case creation.""" + cons.print("[bold]Create a New Case[/bold]") + cons.print() + + # Show templates + cons.print("Available templates: [cyan]1D_minimal[/cyan], [cyan]2D_minimal[/cyan], [cyan]3D_minimal[/cyan]") + cons.print("[dim]Or use 'example:' to copy from examples[/dim]") + cons.print() + + name = Prompt.ask("Case name", default="my_case") + template = Prompt.ask("Template", default="1D_minimal") + + _run_mfc_command(["./mfc.sh", "new", name, "-t", template]) + + +def _interactive_validate(): + """Interactive case validation.""" + cons.print("[bold]Validate a Case File[/bold]") + cons.print() + + path = Prompt.ask("Path to case.py") + + _run_mfc_command(["./mfc.sh", "validate", path]) + + +def _interactive_build(): + """Interactive build.""" + cons.print("[bold]Build MFC[/bold]") + cons.print() + + jobs = Prompt.ask("Number of parallel jobs", default="4") + gpu = Prompt.ask("Enable GPU support?", choices=["y", "n"], default="n") + + args = ["./mfc.sh", "build", "-j", jobs] + if gpu == "y": + args.append("--gpu") + + _run_mfc_command(args) + + +def _interactive_run(): + """Interactive run.""" + cons.print("[bold]Run a Simulation[/bold]") + cons.print() + + path = Prompt.ask("Path to case.py") + ranks = Prompt.ask("Number of MPI ranks", default="1") + + _run_mfc_command(["./mfc.sh", "run", path, "-n", ranks]) + + +def _interactive_test(): + """Interactive test.""" + cons.print("[bold]Run Tests[/bold]") + cons.print() + + jobs = Prompt.ask("Number of parallel jobs", default="4") + + _run_mfc_command(["./mfc.sh", "test", "-j", jobs]) + + +def _interactive_clean(): + """Interactive clean.""" + cons.print("[bold]Clean Build Files[/bold]") + cons.print() + + confirm = Prompt.ask("Are you sure you want to clean all build files?", choices=["y", "n"], default="n") + + if confirm == "y": + _run_mfc_command(["./mfc.sh", "clean"]) + else: + cons.print("[dim]Cancelled[/dim]") diff --git a/toolchain/mfc/validate.py b/toolchain/mfc/validate.py new file mode 100644 index 0000000000..71216e7ef5 --- /dev/null +++ b/toolchain/mfc/validate.py @@ -0,0 +1,58 @@ +""" +MFC Validate Command - Validate a case file without building or running. +""" + +import os + +from .state import ARG +from .printer import cons +from .run import input as run_input +from .case_validator import CaseValidator, CaseConstraintError +from .common import MFCException + + +def validate(): + """Validate a case file without building or running.""" + input_file = ARG("input") + + if not os.path.isfile(input_file): + cons.print(f"[bold red]Error:[/bold red] File not found: {input_file}") + exit(1) + + cons.print(f"Validating [bold magenta]{input_file}[/bold magenta]...\n") + + try: + # Step 1: Load and parse case file (checks syntax) + case = run_input.load(input_file, do_print=False) + cons.print("[bold green]✓[/bold green] Syntax valid - case file parsed successfully") + cons.print(f" [dim]Loaded {len(case.params)} parameters[/dim]") + + # Step 2: Run constraint validation for each stage + stages = ['pre_process', 'simulation', 'post_process'] + all_passed = True + + for stage in stages: + try: + validator = CaseValidator(case.params) + validator.validate(stage) + cons.print(f"[bold green]✓[/bold green] {stage} constraints passed") + except CaseConstraintError as e: + all_passed = False + cons.print(f"[bold yellow]![/bold yellow] {stage} constraints: issues found") + # Show the constraint violations indented + for line in str(e).split('\n'): + if line.strip(): + cons.print(f" [dim]{line}[/dim]") + + # Step 3: Show summary + cons.print() + if all_passed: + cons.print("[bold green]Case validation complete - all checks passed![/bold green]") + else: + cons.print("[bold yellow]Case validation complete with warnings.[/bold yellow]") + cons.print("[dim]Note: Some constraint violations may be OK if you're not using that stage.[/dim]") + + except MFCException as e: + cons.print(f"\n[bold red]✗ Validation failed:[/bold red]") + cons.print(f"{e}") + exit(1) diff --git a/toolchain/pyproject.toml b/toolchain/pyproject.toml index 0155b5ca67..53e2140290 100644 --- a/toolchain/pyproject.toml +++ b/toolchain/pyproject.toml @@ -14,6 +14,7 @@ dependencies = [ "argparse", "dataclasses", "fastjsonschema", + "rapidfuzz", # For "did you mean?" typo suggestions # Build System "fypp", @@ -25,7 +26,7 @@ dependencies = [ "typos", "pylint", "fprettify", - "black", + "autopep8", # Python formatter (black has issues with Python 3.12.5) "ansi2txt", # Profiling