@@ -26,10 +26,10 @@ where k is a root of the algebraic (transcendental) equation f(k) = g(k).
2626
2727This is done using a derivative neural network approximation.
2828
29- ``` @example
29+ ``` @example nonlinear_elliptic
3030using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, Roots
3131using Plots
32- import ModelingToolkit: Interval, infimum, supremum
32+ using ModelingToolkit: Interval, infimum, supremum
3333
3434@parameters x, y
3535Dx = Differential(x)
@@ -79,18 +79,14 @@ input_ = length(domains)
7979n = 15
8080chain = [Lux.Chain(Dense(input_, n, Lux.σ), Dense(n, n, Lux.σ), Dense(n, 1)) for _ in 1:6] # 1:number of @variables
8181
82- strategy = QuadratureTraining( )
82+ strategy = GridTraining(0.01 )
8383discretization = PhysicsInformedNN(chain, strategy)
8484
8585vars = [u(x, y), w(x, y), Dxu(x, y), Dyu(x, y), Dxw(x, y), Dyw(x, y)]
8686@named pdesystem = PDESystem(eqs_, bcs__, domains, [x, y], vars)
8787prob = NeuralPDE.discretize(pdesystem, discretization)
8888sym_prob = NeuralPDE.symbolic_discretize(pdesystem, discretization)
8989
90- strategy = NeuralPDE.QuadratureTraining()
91- discretization = PhysicsInformedNN(chain, strategy)
92- sym_prob = NeuralPDE.symbolic_discretize(pdesystem, discretization)
93-
9490pde_inner_loss_functions = sym_prob.loss_functions.pde_loss_functions
9591bcs_inner_loss_functions = sym_prob.loss_functions.bc_loss_functions[1:6]
9692aprox_derivative_loss_functions = sym_prob.loss_functions.bc_loss_functions[7:end]
@@ -99,15 +95,15 @@ global iteration = 0
9995callback = function (p, l)
10096 if iteration % 10 == 0
10197 println("loss: ", l)
102- println("pde_losses: ", map(l_ -> l_(p), pde_inner_loss_functions))
103- println("bcs_losses: ", map(l_ -> l_(p), bcs_inner_loss_functions))
104- println("der_losses: ", map(l_ -> l_(p), aprox_derivative_loss_functions))
98+ println("pde_losses: ", map(l_ -> l_(p.u ), pde_inner_loss_functions))
99+ println("bcs_losses: ", map(l_ -> l_(p.u ), bcs_inner_loss_functions))
100+ println("der_losses: ", map(l_ -> l_(p.u ), aprox_derivative_loss_functions))
105101 end
106102 global iteration += 1
107103 return false
108104end
109105
110- res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 5000 )
106+ res = Optimization.solve(prob, BFGS(); maxiters = 100 )
111107
112108phi = discretization.phi
113109
@@ -120,14 +116,19 @@ analytic_sol_func(x, y) = [u_analytic(x, y), w_analytic(x, y)]
120116u_real = [[analytic_sol_func(x, y)[i] for x in xs for y in ys] for i in 1:2]
121117u_predict = [[phi[i]([x, y], minimizers_[i])[1] for x in xs for y in ys] for i in 1:2]
122118diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:2]
119+ ps = []
123120for i in 1:2
124121 p1 = plot(xs, ys, u_real[i], linetype = :contourf, title = "u$i, analytic")
125122 p2 = plot(xs, ys, u_predict[i], linetype = :contourf, title = "predict")
126123 p3 = plot(xs, ys, diff_u[i], linetype = :contourf, title = "error")
127- plot(p1, p2, p3)
128- savefig("non_linear_elliptic_sol_u$i")
124+ push!(ps, plot(p1, p2, p3))
129125end
130126```
131127
132- ![ non_linear_elliptic_sol_u1] ( https://user-images.githubusercontent.com/26853713/125745550-0b667c10-b09a-4659-a543-4f7a7e025d6c.png )
133- ![ non_linear_elliptic_sol_u2] ( https://user-images.githubusercontent.com/26853713/125745571-45a04739-7838-40ce-b979-43b88d149028.png )
128+ ``` @example nonlinear_elliptic
129+ ps[1]
130+ ```
131+
132+ ``` @example nonlinear_elliptic
133+ ps[2]
134+ ```
0 commit comments