Skip to content

Commit

Permalink
Merge pull request #149 from YichengDWu/scalar
Browse files Browse the repository at this point in the history
Rename `Scalar` to `ConstantFunction`
  • Loading branch information
YichengDWu authored Oct 2, 2022
2 parents f7a0f39 + 9d5a8ca commit 4fac3ab
Show file tree
Hide file tree
Showing 7 changed files with 46 additions and 50 deletions.
31 changes: 12 additions & 19 deletions docs/src/tutorials/sod.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,33 +14,26 @@ u₀(x) = 0.0
p₀(x) = ifelse(x < 0.5, 1.0, 0.1)
@register p₀(x)

bcs = [
ρ(0, x) ~ ρ₀(x),
u(0, x) ~ u₀(x),
p(0, x) ~ p₀(x),
u(t, 0) ~ 0.0,
u(t, 1) ~ 0.0
]
bcs = [ρ(0, x) ~ ρ₀(x), u(0, x) ~ u₀(x), p(0, x) ~ p₀(x), u(t, 0) ~ 0.0, u(t, 1) ~ 0.0]

γ = 1.4
E(t, x) = p(t, x) /- 1) + 0.5 * ρ(t, x) * u(t, x) * u(t, x)

eqs = [
Dₜ(ρ(t, x)) + Dₓ(ρ(t, x) * u(t, x)) ~ 0.0,
Dₜ(ρ(t, x) * u(t, x)) + Dₓ(ρ(t, x) * u(t, x) * u(t, x) + p(t, x)) ~ 0.0,
Dₜ(E(t,x)) + Dₓ(u(t, x) * (E(t,x) + p(t, x))) ~ 0.0
Dₜ(E(t, x)) + Dₓ(u(t, x) * (E(t, x) + p(t, x))) ~ 0.0,
]

t_min, t_max = 0.0, 0.2
x_min, x_max = 0.0, 1.0
domains = [t Interval(t_min, t_max),
x Interval(x_min, x_max)]
domains = [t Interval(t_min, t_max), x Interval(x_min, x_max)]

@named pde_system = PDESystem(eqs, bcs, domains, [t, x], [u(t, x), ρ(t, x), p(t, x)])

pinn = PINN(u = FullyConnected(2, 1, tanh; num_layers = 4, hidden_dims = 16),
ρ = FullyConnected(2, 1, tanh; num_layers = 4, hidden_dims = 16),
p = FullyConnected(2, 1, tanh; num_layers = 4, hidden_dims = 16))
pinn = PINN(; u=FullyConnected(2, 1, tanh; num_layers=4, hidden_dims=16),
ρ=FullyConnected(2, 1, tanh; num_layers=4, hidden_dims=16),
p=FullyConnected(2, 1, tanh; num_layers=4, hidden_dims=16))

sampler = QuasiRandomSampler(1000, 100)

Expand All @@ -59,19 +52,19 @@ callback = function (p, l)
return false
end

res = Optimization.solve(prob, LBFGS(); maxiters=2000, callback = callback)
res = Optimization.solve(prob, LBFGS(); maxiters=2000, callback=callback)

for _ in 1:10
data = Sophon.sample(pde_system, sampler, strategy)
prob = remake(prob; u0=res.u, p = data)
res = Optimization.solve(prob, LBFGS(); maxiters=2000, callback = callback)
prob = remake(prob; u0=res.u, p=data)
res = Optimization.solve(prob, LBFGS(); maxiters=2000, callback=callback)
end

θ = res.u
phi = pinn.phi
xs = x_min:0.01:x_max |> collect

phi = pinn.phi
p1 = plot(xs, [first(phi.u([t_max, x], θ.u)) for x in xs], label ="u(t=1,x)")
p2 = plot!(xs, [first(phi.ρ([t_max, x], θ.ρ)) for x in xs], label ="ρ(t=1,x)")
p3 = plot!(xs, [first(phi.p([t_max, x], θ.p)) for x in xs], label ="p(t=1,x)")
p1 = plot(xs, [first(phi.u([t_max, x], θ.u)) for x in xs]; label="u(t=1,x)")
p2 = plot!(xs, [first(phi.ρ([t_max, x], θ.ρ)) for x in xs]; label="ρ(t=1,x)")
p3 = plot!(xs, [first(phi.p([t_max, x], θ.p)) for x in xs]; label="p(t=1,x)")
4 changes: 2 additions & 2 deletions src/Sophon.jl
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ export FourierFeature, TriplewiseFusion, FullyConnected, Sine, RBF, DiscreteFour
Scalar, ScalarLayer, SplitFunction
export PINNAttention, FourierNet, FourierAttention, Siren, FourierFilterNet, BACON
export DeepONet
export PINN, symbolic_discretize, discretize, QuasiRandomSampler,
NonAdaptiveTraining, AdaptiveTraining
export PINN, symbolic_discretize, discretize, QuasiRandomSampler, NonAdaptiveTraining,
AdaptiveTraining

end
2 changes: 1 addition & 1 deletion src/compact/NeuralPDE/pinnsampler.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ function sample end
sampling_alg=SobolSample())
Sampler to generate the datasets for PDE and boundary conditions using a quisa-random sampling algorithm.
You can call `sample(pde, sampler, strategy)` on it to generate all the datasets. See [QuasiMonteCarlo.jl](https://github.com/SciML/QuasiMonteCarlo.jl)
You can call `sample(pde, sampler, strategy)` on it to generate all the datasets. See [QuasiMonteCarlo.jl](https://github.com/SciML/QuasiMonteCarlo.jl)
for available sampling algorithms.
"""
struct QuasiRandomSampler{T, P, B, S} <: PINNSampler{T}
Expand Down
25 changes: 16 additions & 9 deletions src/compact/NeuralPDE/training_strategies.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ struct NonAdaptiveTraining{P, B} <: AbstractTrainingAlg
end
end

function scalarize(strategy::NonAdaptiveTraining{P, B}, phi, datafree_pde_loss_function,
function scalarize(strategy::NonAdaptiveTraining{P, B}, phi, datafree_pde_loss_function,
datafree_bc_loss_function) where {P, B}
(; pde_weights, bcs_weights) = strategy

Expand Down Expand Up @@ -45,29 +45,34 @@ Adaptive weights for the loss functions. Here `pde_weights` and `bcs_weights` ar
functions that take in `(phi, x, θ)` and return the point-wise weights. Note that `bcs_weights` can be
real numbers but they will be converted to functions that return the same numbers.
"""
struct AdaptiveTraining{P,B} <: AbstractTrainingAlg
struct AdaptiveTraining{P, B} <: AbstractTrainingAlg
pde_weights::P
bcs_weights::B
end

function AdaptiveTraining(pde_weights::Function, bcs_weights::Real)
_bcs_weights = (phi, cord, θ) -> bcs_weights
return AdaptiveTraining{typeof(pde_weights), typeof(_bcs_weights)}(pde_weights, _bcs_weights)
return AdaptiveTraining{typeof(pde_weights), typeof(_bcs_weights)}(pde_weights,
_bcs_weights)
end

function AdaptiveTraining(pde_weights::Function, bcs_weights::NTuple{N, <:Real}) where {N}
_bcs_weights = map(w -> (phi, cord, θ) -> w, bcs_weights)
return AdaptiveTraining{typeof(pde_weights), typeof(_bcs_weights)}(pde_weights, _bcs_weights)
return AdaptiveTraining{typeof(pde_weights), typeof(_bcs_weights)}(pde_weights,
_bcs_weights)
end

function AdaptiveTraining(pde_weights::Tuple{Vararg{<:Function}}, bcs_weights::Int)
_bcs_weights = (phi, cord, θ) -> bcs_weights
return AdaptiveTraining{typeof(pde_weights), typeof(_bcs_weights)}(pde_weights, _bcs_weights)
return AdaptiveTraining{typeof(pde_weights), typeof(_bcs_weights)}(pde_weights,
_bcs_weights)
end

function AdaptiveTraining(pde_weights::Tuple{Vararg{<:Function}}, bcs_weights::NTuple{N, <:Real}) where {N}
function AdaptiveTraining(pde_weights::Tuple{Vararg{<:Function}},
bcs_weights::NTuple{N, <:Real}) where {N}
_bcs_weights = map(w -> (phi, cord, θ) -> w, bcs_weights)
return AdaptiveTraining{typeof(pde_weights), typeof(_bcs_weights)}(pde_weights, _bcs_weights)
return AdaptiveTraining{typeof(pde_weights), typeof(_bcs_weights)}(pde_weights,
_bcs_weights)
end

function scalarize(strategy::AdaptiveTraining, phi, datafree_pde_loss_function,
Expand All @@ -88,9 +93,11 @@ end

function scalarize(phi, weights::Tuple{Vararg{<:Function}}, datafree_loss_function::Tuple)
N = length(datafree_loss_function)
ex = :(mean($(weights[1])($phi, p[1], θ) .* abs2.($(datafree_loss_function[1])(p[1], θ))))
ex = :(mean($(weights[1])($phi, p[1], θ) .*
abs2.($(datafree_loss_function[1])(p[1], θ))))
for i in 2:N
ex = :(mean($(weights[i])($phi, p[$i], θ) .* abs2.($(datafree_loss_function[i])(p[$i], θ))) + $ex)
ex = :(mean($(weights[i])($phi, p[$i], θ) .*
abs2.($(datafree_loss_function[i])(p[$i], θ))) + $ex)
end
loss_f = :((θ, p) -> $ex)
return NeuralPDE.@RuntimeGeneratedFunction(loss_f)
Expand Down
8 changes: 2 additions & 6 deletions src/compact/NeuralPDE/utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -471,14 +471,10 @@ function numeric_derivative(phi, x, θ, dim::Int, order::Int)
ε = adapt(_type, ε)

if order == 4
return (phi(x .+ 2 .* ε, θ) .- 4 .* phi(x .+ ε, θ)
.+
6 .* phi(x, θ)
.-
return (phi(x .+ 2 .* ε, θ) .- 4 .* phi(x .+ ε, θ) .+ 6 .* phi(x, θ) .-
4 .* phi(x .- ε, θ) .+ phi(x .- 2 .* ε, θ)) .* _epsilon^4
elseif order == 3
return (phi(x .+ 2 .* ε, θ) .- 2 .* phi(x .+ ε, θ, phi) .+ 2 .* phi(x .- ε, θ)
-
return (phi(x .+ 2 .* ε, θ) .- 2 .* phi(x .+ ε, θ, phi) .+ 2 .* phi(x .- ε, θ) -
phi(x .- 2 .* ε, θ)) .* _epsilon^3 ./ 2
elseif order == 2
return (phi(x .+ ε, θ) .+ phi(x .- ε, θ) .- 2 .* phi(x, θ)) .* _epsilon^2
Expand Down
18 changes: 9 additions & 9 deletions src/layers/basic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ Fourier Feature Network.
\phi^{(i)}(x)=\left[\sin \left(2 \pi f_i x\right) ; \cos 2 \pi f_i x\right]
```
# Parameters
If `std` is used, then parameters are `W`s in the formula.
# Inputs
Expand Down Expand Up @@ -280,27 +280,27 @@ end

ScalarLayer(connection::Function) = ScalarLayer{typeof(connection)}(connection)

initialparameters(rng::AbstractRNG, s::ScalarLayer) = (; scalar=0.0f0)
initialparameters(rng::AbstractRNG, s::ScalarLayer) = (; scalar=[0.0f0;;])
parameterlength(s::ScalarLayer) = 1

@inline function (s::ScalarLayer)(x::AbstractArray, ps, st::NamedTuple)
return s.connection(ps.scalar, x), st
end

"""
Scalar()
ConstantFunction()
A conatiner for scalar parameter. This is useful for the case that you want a dummy layer
that returns the scalar parameter for any input.
"""
struct Scalar <: AbstractExplicitLayer end
struct ConstantFunction <: AbstractExplicitLayer end

initialparameters(rng::AbstractRNG, s::Scalar) = (; scalar=0.0f0)
parameterlength(s::Scalar) = 1
statelength(s::Scalar) = 0
initialparameters(rng::AbstractRNG, s::ConstantFunction) = (; constant=[0.0f0;;])
parameterlength(s::ConstantFunction) = 1
statelength(s::ConstantFunction) = 0

@inline function (s::Scalar)(x::AbstractArray, ps, st::NamedTuple)
return ps.scalar
@inline function (s::ConstantFunction)(x::AbstractArray, ps, st::NamedTuple)
return ps.constant, st
end

"""
Expand Down
8 changes: 4 additions & 4 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -234,9 +234,9 @@ rng = Random.default_rng()
end

@testset "TrainingStrategy" begin
@test_nowarn AdaptiveTraining((θ,p)->p, 2)
@test_nowarn AdaptiveTraining((θ,p)->p,(3,4,5))
@test_nowarn AdaptiveTraining((θ,p)->p, 5)
@test_nowarn AdaptiveTraining(((θ,p)->p, (θ,p)->θ),(3,4,5))
@test_nowarn AdaptiveTraining((θ, p) -> p, 2)
@test_nowarn AdaptiveTraining((θ, p) -> p, (3, 4, 5))
@test_nowarn AdaptiveTraining((θ, p) -> p, 5)
@test_nowarn AdaptiveTraining(((θ, p) -> p, (θ, p) -> θ), (3, 4, 5))
end
end end

0 comments on commit 4fac3ab

Please sign in to comment.