diff --git a/docs/zh/examples/aneurysm.md b/docs/zh/examples/aneurysm.md
index ac3cc2e7e..ed73444ac 100644
--- a/docs/zh/examples/aneurysm.md
+++ b/docs/zh/examples/aneurysm.md
@@ -2,6 +2,34 @@
+=== "模型训练命令"
+
+ ``` sh
+ # linux
+ wget https://paddle-org.bj.bcebos.com/paddlescience/datasets/aneurysm/aneurysm_dataset.tar
+ # windows
+ # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/aneurysm/aneurysm_dataset.tar --output aneurysm_dataset.tar
+ # unzip it
+ tar -xvf aneurysm_dataset.tar
+ python aneurysm.py
+ ```
+
+=== "模型评估命令"
+
+ ``` sh
+ # linux
+ wget https://paddle-org.bj.bcebos.com/paddlescience/datasets/aneurysm/aneurysm_dataset.tar
+ # windows
+ # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/aneurysm/aneurysm_dataset.tar --output aneurysm_dataset.tar
+ # unzip it
+ tar -xvf aneurysm_dataset.tar
+ python aneurysm.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/aneurysm/aneurysm_pretrained.pdparams
+ ```
+
+| 预训练模型 | 指标 |
+|:--| :--|
+| [aneurysm_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/aneurysm/aneurysm_pretrained.pdparams) | loss(ref_u_v_w_p): 0.01488
MSE.p(ref_u_v_w_p): 0.01412
MSE.u(ref_u_v_w_p): 0.00021
MSE.v(ref_u_v_w_p): 0.00024
MSE.w(ref_u_v_w_p): 0.00032 |
+
## 1. 背景简介
深度学习方法可以用于处理血管瘤问题,其中包括基于物理信息的深度学习方法。这种方法可以用于脑血管瘤的压力建模,以预测和评估血管瘤破裂的风险。
@@ -32,9 +60,9 @@ $$
上式中 $f$ 即为 MLP 模型本身,用 PaddleScience 代码表示如下
-``` py linenums="26"
+``` py linenums="24"
--8<--
-examples/aneurysm/aneurysm.py:26:29
+examples/aneurysm/aneurysm.py:24:25
--8<--
```
@@ -46,9 +74,9 @@ examples/aneurysm/aneurysm.py:26:29
血管瘤模型涉及到 2 个方程,一是流体 N-S 方程,二是流量计算方程,因此使用 PaddleScience 内置的 `NavierStokes` 和 `NormalDotVec` 即可。
-``` py linenums="31"
+``` py linenums="27"
--8<--
-examples/aneurysm/aneurysm.py:31:37
+examples/aneurysm/aneurysm.py:27:33
--8<--
```
@@ -77,17 +105,17 @@ tar -xvf aneurysm_dataset.tar
然后通过 PaddleScience 内置的 STL 几何类 `Mesh` 来读取、解析这些几何文件,并且通过布尔运算,组合出各个计算域,代码如下:
-``` py linenums="39"
+``` py linenums="35"
--8<--
-examples/aneurysm/aneurysm.py:39:44
+examples/aneurysm/aneurysm.py:35:40
--8<--
```
在此之后可以对几何域进行缩放和平移,以缩放输入数据的坐标范围,促进模型训练收敛。
-``` py linenums="46"
+``` py linenums="42"
--8<--
-examples/aneurysm/aneurysm.py:46:62
+examples/aneurysm/aneurysm.py:42:54
--8<--
```
@@ -95,9 +123,9 @@ examples/aneurysm/aneurysm.py:46:62
本案例共涉及到 6 个约束,在具体约束构建之前,可以先构建数据读取配置,以便后续构建多个约束时复用该配置。
-``` py linenums="64"
+``` py linenums="56"
--8<--
-examples/aneurysm/aneurysm.py:64:75
+examples/aneurysm/aneurysm.py:56:66
--8<--
```
@@ -105,9 +133,9 @@ examples/aneurysm/aneurysm.py:64:75
以作用在内部点上的 `InteriorConstraint` 为例,代码如下:
-``` py linenums="125"
+``` py linenums="113"
--8<--
-examples/aneurysm/aneurysm.py:125:132
+examples/aneurysm/aneurysm.py:113:120
--8<--
```
@@ -128,17 +156,17 @@ examples/aneurysm/aneurysm.py:125:132
接着需要对**血管入口、出口、血管壁**这三个表面施加约束,包括入口速度约束、出口压力约束、血管壁无滑移约束。
在 `bc_inlet` 约束中,入口处的流速满足从中心点开始向周围呈二次抛物线衰减,此处使用抛物线函数表示速度随着远离圆心而衰减,再将其作为 `BoundaryConstraint` 的第二个参数(字典)的 value。
-``` py linenums="77"
+``` py linenums="68"
--8<--
-examples/aneurysm/aneurysm.py:77:108
+examples/aneurysm/aneurysm.py:68:96
--8<--
```
血管出口、血管壁约束的构建方法类似,如下所示:
-``` py linenums="109"
+``` py linenums="97"
--8<--
-examples/aneurysm/aneurysm.py:109:124
+examples/aneurysm/aneurysm.py:97:112
--8<--
```
@@ -146,9 +174,9 @@ examples/aneurysm/aneurysm.py:109:124
对于血管入口下方的一段区域和出口区域(面),需额外施加流入和流出的流量约束,由于流量计算涉及到具体面积,因此需要使用离散积分的方式进行计算,这些过程已经内置在了 `IntegralConstraint` 这一约束条件中。如下所示:
-``` py linenums="133"
+``` py linenums="121"
--8<--
-examples/aneurysm/aneurysm.py:133:160
+examples/aneurysm/aneurysm.py:121:148
--8<--
```
@@ -164,9 +192,9 @@ $$
在微分方程约束、边界约束、初值约束构建完毕之后,以刚才的命名为关键字,封装到一个字典中,方便后续访问。
-``` py linenums="161"
+``` py linenums="149"
--8<--
-examples/aneurysm/aneurysm.py:161:169
+examples/aneurysm/aneurysm.py:149:157
--8<--
```
@@ -174,9 +202,9 @@ examples/aneurysm/aneurysm.py:161:169
接下来需要指定训练轮数和学习率,此处按实验经验,使用 1500 轮训练轮数。
-``` py linenums="171"
+``` py linenums="59"
--8<--
-examples/aneurysm/aneurysm.py:171:172
+examples/aneurysm/conf/aneurysm.yaml:59:75
--8<--
```
@@ -184,9 +212,9 @@ examples/aneurysm/aneurysm.py:171:172
训练过程会调用优化器来更新模型参数,此处选择较为常用的 `Adam` 优化器,并配合使用机器学习中常用的 OneCycle 学习率调整策略。
-``` py linenums="174"
+``` py linenums="159"
--8<--
-examples/aneurysm/aneurysm.py:174:183
+examples/aneurysm/aneurysm.py:159:163
--8<--
```
@@ -194,9 +222,9 @@ examples/aneurysm/aneurysm.py:174:183
在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,因此使用 `ppsci.validate.GeometryValidator` 构建评估器。
-``` py linenums="185"
+``` py linenums="165"
--8<--
-examples/aneurysm/aneurysm.py:185:234
+examples/aneurysm/aneurysm.py:165:241
--8<--
```
@@ -206,9 +234,9 @@ examples/aneurysm/aneurysm.py:185:234
本文中的输出数据是一个区域内的三维点集,因此只需要将评估的输出数据保存成 **vtu格式** 文件,最后用可视化软件打开查看即可。代码如下:
-``` py linenums="236"
+``` py linenums="216"
--8<--
-examples/aneurysm/aneurysm.py:236:250
+examples/aneurysm/aneurysm.py:216:229
--8<--
```
@@ -216,9 +244,9 @@ examples/aneurysm/aneurysm.py:236:250
完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估、可视化。
-``` py linenums="251"
+``` py linenums="231"
--8<--
-examples/aneurysm/aneurysm.py:251:278
+examples/aneurysm/aneurysm.py:231:258
--8<--
```
diff --git a/examples/aneurysm/aneurysm.py b/examples/aneurysm/aneurysm.py
index 2d34ae0dc..93e11cef2 100644
--- a/examples/aneurysm/aneurysm.py
+++ b/examples/aneurysm/aneurysm.py
@@ -4,55 +4,47 @@
pretrained model download link: https://paddle-org.bj.bcebos.com/paddlescience/models/aneurysm/aneurysm_pretrained.pdparams
"""
+from os import path as osp
+
+import hydra
import numpy as np
+from omegaconf import DictConfig
import ppsci
-from ppsci.utils import config
from ppsci.utils import logger
from ppsci.utils import reader
-if __name__ == "__main__":
- args = config.parse_args()
+
+def train(cfg: DictConfig):
# set random seed for reproducibility
- SEED = 2023
- ppsci.utils.misc.set_random_seed(SEED)
- # set output directory
- OUTPUT_DIR = (
- f"./output_aneurysm_seed{SEED}" if not args.output_dir else args.output_dir
- )
+ ppsci.utils.misc.set_random_seed(cfg.seed)
# initialize logger
- logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info")
+ logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info")
# set model
- model = ppsci.arch.MLP(
- ("x", "y", "z"), ("u", "v", "w", "p"), 6, 512, "silu", weight_norm=True
- )
+ model = ppsci.arch.MLP(**cfg.MODEL)
# set equation
- NU = 0.025
- SCALE = 0.4
equation = {
- "NavierStokes": ppsci.equation.NavierStokes(NU * SCALE, 1.0, 3, False),
+ "NavierStokes": ppsci.equation.NavierStokes(
+ cfg.NU * cfg.SCALE, cfg.RHO, cfg.DIM, False
+ ),
"NormalDotVec": ppsci.equation.NormalDotVec(("u", "v", "w")),
}
# set geometry
- inlet_geo = ppsci.geometry.Mesh("./stl/aneurysm_inlet.stl")
- outlet_geo = ppsci.geometry.Mesh("./stl/aneurysm_outlet.stl")
- noslip_geo = ppsci.geometry.Mesh("./stl/aneurysm_noslip.stl")
- integral_geo = ppsci.geometry.Mesh("./stl/aneurysm_integral.stl")
- interior_geo = ppsci.geometry.Mesh("./stl/aneurysm_closed.stl")
-
- # inlet velocity profile
- CENTER = (-18.40381048596882, -50.285383353981196, 12.848136936899031)
- SCALE = 0.4
+ inlet_geo = ppsci.geometry.Mesh(cfg.INLET_STL_PATH)
+ outlet_geo = ppsci.geometry.Mesh(cfg.OUTLET_STL_PATH)
+ noslip_geo = ppsci.geometry.Mesh(cfg.NOSLIP_STL_PATH)
+ integral_geo = ppsci.geometry.Mesh(cfg.INTEGRAL_STL_PATH)
+ interior_geo = ppsci.geometry.Mesh(cfg.INTERIOR_STL_PATH)
# normalize meshes
- inlet_geo = inlet_geo.translate(-np.array(CENTER)).scale(SCALE)
- outlet_geo = outlet_geo.translate(-np.array(CENTER)).scale(SCALE)
- noslip_geo = noslip_geo.translate(-np.array(CENTER)).scale(SCALE)
- integral_geo = integral_geo.translate(-np.array(CENTER)).scale(SCALE)
- interior_geo = interior_geo.translate(-np.array(CENTER)).scale(SCALE)
+ inlet_geo = inlet_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE)
+ outlet_geo = outlet_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE)
+ noslip_geo = noslip_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE)
+ integral_geo = integral_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE)
+ interior_geo = interior_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE)
geom = {
"inlet_geo": inlet_geo,
"outlet_geo": outlet_geo,
@@ -62,10 +54,9 @@
}
# set dataloader config
- ITERS_PER_EPOCH = 1000
train_dataloader_cfg = {
"dataset": "NamedArrayDataset",
- "iters_per_epoch": ITERS_PER_EPOCH,
+ "iters_per_epoch": cfg.TRAIN.iters_per_epoch,
"sampler": {
"name": "BatchSampler",
"drop_last": True,
@@ -75,34 +66,31 @@
}
# set constraint
- INLET_NORMAL = (0.8526, -0.428, 0.299)
- INLET_AREA = 21.1284 * (SCALE**2)
- INLET_CENTER = (-4.24298030045776, 4.082857101816247, -4.637790193399717)
+ INLET_AREA = 21.1284 * (cfg.SCALE**2)
INLET_RADIUS = np.sqrt(INLET_AREA / np.pi)
- INLET_VEL = 1.5
def _compute_parabola(_in):
- centered_x = _in["x"] - INLET_CENTER[0]
- centered_y = _in["y"] - INLET_CENTER[1]
- centered_z = _in["z"] - INLET_CENTER[2]
+ centered_x = _in["x"] - cfg.INLET_CENTER[0]
+ centered_y = _in["y"] - cfg.INLET_CENTER[1]
+ centered_z = _in["z"] - cfg.INLET_CENTER[2]
distance = np.sqrt(centered_x**2 + centered_y**2 + centered_z**2)
- parabola = INLET_VEL * np.maximum((1 - (distance / INLET_RADIUS) ** 2), 0)
+ parabola = cfg.INLET_VEL * np.maximum((1 - (distance / INLET_RADIUS) ** 2), 0)
return parabola
def inlet_u_ref_func(_in):
- return INLET_NORMAL[0] * _compute_parabola(_in)
+ return cfg.INLET_NORMAL[0] * _compute_parabola(_in)
def inlet_v_ref_func(_in):
- return INLET_NORMAL[1] * _compute_parabola(_in)
+ return cfg.INLET_NORMAL[1] * _compute_parabola(_in)
def inlet_w_ref_func(_in):
- return INLET_NORMAL[2] * _compute_parabola(_in)
+ return cfg.INLET_NORMAL[2] * _compute_parabola(_in)
bc_inlet = ppsci.constraint.BoundaryConstraint(
{"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]},
{"u": inlet_u_ref_func, "v": inlet_v_ref_func, "w": inlet_w_ref_func},
geom["inlet_geo"],
- {**train_dataloader_cfg, "batch_size": 1100},
+ {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_inlet},
ppsci.loss.MSELoss("sum"),
name="inlet",
)
@@ -110,7 +98,7 @@ def inlet_w_ref_func(_in):
{"p": lambda d: d["p"]},
{"p": 0},
geom["outlet_geo"],
- {**train_dataloader_cfg, "batch_size": 650},
+ {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_outlet},
ppsci.loss.MSELoss("sum"),
name="outlet",
)
@@ -118,15 +106,15 @@ def inlet_w_ref_func(_in):
{"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]},
{"u": 0, "v": 0, "w": 0},
geom["noslip_geo"],
- {**train_dataloader_cfg, "batch_size": 5200},
+ {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_noslip},
ppsci.loss.MSELoss("sum"),
name="no_slip",
)
- pde_constraint = ppsci.constraint.InteriorConstraint(
+ pde = ppsci.constraint.InteriorConstraint(
equation["NavierStokes"].equations,
{"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
geom["interior_geo"],
- {**train_dataloader_cfg, "batch_size": 6000},
+ {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.pde},
ppsci.loss.MSELoss("sum"),
name="interior",
)
@@ -136,12 +124,12 @@ def inlet_w_ref_func(_in):
geom["outlet_geo"],
{
**train_dataloader_cfg,
- "iters_per_epoch": 100,
- "batch_size": 1,
- "integral_batch_size": 310,
+ "iters_per_epoch": cfg.TRAIN.iters_integral.igc_outlet,
+ "batch_size": cfg.TRAIN.batch_size.igc_outlet,
+ "integral_batch_size": cfg.TRAIN.integral_batch_size.igc_outlet,
},
ppsci.loss.IntegralLoss("sum"),
- weight_dict={"normal_dot_vec": 0.1},
+ weight_dict=cfg.TRAIN.weight.igc_outlet,
name="igc_outlet",
)
igc_integral = ppsci.constraint.IntegralConstraint(
@@ -150,12 +138,12 @@ def inlet_w_ref_func(_in):
geom["integral_geo"],
{
**train_dataloader_cfg,
- "iters_per_epoch": 100,
- "batch_size": 1,
- "integral_batch_size": 310,
+ "iters_per_epoch": cfg.TRAIN.iters_integral.igc_integral,
+ "batch_size": cfg.TRAIN.batch_size.igc_integral,
+ "integral_batch_size": cfg.TRAIN.integral_batch_size.igc_integral,
},
ppsci.loss.IntegralLoss("sum"),
- weight_dict={"normal_dot_vec": 0.1},
+ weight_dict=cfg.TRAIN.weight.igc_integral,
name="igc_integral",
)
# wrap constraints together
@@ -163,28 +151,20 @@ def inlet_w_ref_func(_in):
bc_inlet.name: bc_inlet,
bc_outlet.name: bc_outlet,
bc_noslip.name: bc_noslip,
- pde_constraint.name: pde_constraint,
+ pde.name: pde,
igc_outlet.name: igc_outlet,
igc_integral.name: igc_integral,
}
- # set training hyper-parameters
- EPOCHS = 1500 if not args.epochs else args.epochs
-
# set optimizer
lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay(
- EPOCHS,
- ITERS_PER_EPOCH,
- 0.001,
- 0.95,
- EPOCHS * ITERS_PER_EPOCH // 100,
- by_epoch=False,
+ **cfg.TRAIN.lr_scheduler
)()
optimizer = ppsci.optimizer.Adam(lr_scheduler)(model)
# set validator
eval_data_dict = reader.load_csv_file(
- "./data/aneurysm_parabolicInlet_sol0.csv",
+ cfg.EVAL_CSV_PATH,
("x", "y", "z", "u", "v", "w", "p"),
{
"x": "Points:0",
@@ -197,12 +177,12 @@ def inlet_w_ref_func(_in):
},
)
input_dict = {
- "x": (eval_data_dict["x"] - CENTER[0]) * SCALE,
- "y": (eval_data_dict["y"] - CENTER[1]) * SCALE,
- "z": (eval_data_dict["z"] - CENTER[2]) * SCALE,
+ "x": (eval_data_dict["x"] - cfg.CENTER[0]) * cfg.SCALE,
+ "y": (eval_data_dict["y"] - cfg.CENTER[1]) * cfg.SCALE,
+ "z": (eval_data_dict["z"] - cfg.CENTER[2]) * cfg.SCALE,
}
if "area" in input_dict.keys():
- input_dict["area"] *= SCALE ** (equation["NavierStokes"].dim)
+ input_dict["area"] *= cfg.SCALE ** (equation["NavierStokes"].dim)
label_dict = {
"p": eval_data_dict["p"],
@@ -220,7 +200,7 @@ def inlet_w_ref_func(_in):
"num_workers": 1,
}
sup_validator = ppsci.validate.SupervisedValidator(
- {**eval_dataloader_cfg, "batch_size": 4096},
+ {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size.sup_validator},
ppsci.loss.MSELoss("mean"),
{
"p": lambda out: out["p"],
@@ -235,7 +215,7 @@ def inlet_w_ref_func(_in):
# set visualizer(optional)
visualizer = {
- "visulzie_u_v_w_p": ppsci.visualize.VisualizerVtu(
+ "visualize_u_v_w_p": ppsci.visualize.VisualizerVtu(
input_dict,
{
"p": lambda out: out["p"],
@@ -243,7 +223,7 @@ def inlet_w_ref_func(_in):
"v": lambda out: out["v"],
"w": lambda out: out["w"],
},
- batch_size=4096,
+ batch_size=cfg.EVAL.batch_size.sup_validator,
prefix="result_u_v_w_p",
),
}
@@ -252,43 +232,135 @@ def inlet_w_ref_func(_in):
solver = ppsci.solver.Solver(
model,
constraint,
- OUTPUT_DIR,
+ cfg.output_dir,
optimizer,
lr_scheduler,
- EPOCHS,
- ITERS_PER_EPOCH,
- save_freq=20,
+ cfg.TRAIN.epochs,
+ cfg.TRAIN.iters_per_epoch,
+ save_freq=cfg.TRAIN.save_freq,
+ log_freq=cfg.log_freq,
eval_during_train=True,
- log_freq=20,
- eval_freq=20,
- seed=SEED,
+ eval_freq=cfg.TRAIN.eval_freq,
+ seed=cfg.seed,
equation=equation,
geom=geom,
validator=validator,
visualizer=visualizer,
- eval_with_no_grad=True,
+ pretrained_model_path=cfg.TRAIN.pretrained_model_path,
+ checkpoint_path=cfg.TRAIN.checkpoint_path,
+ eval_with_no_grad=cfg.EVAL.eval_with_no_grad,
)
# train model
solver.train()
-
# evaluate after finished training
solver.eval()
# visualize prediction after finished training
solver.visualize()
- # directly evaluate pretrained model(optional)
- logger.init_logger("ppsci", f"{OUTPUT_DIR}/eval.log", "info")
+
+def evaluate(cfg: DictConfig):
+ # set random seed for reproducibility
+ ppsci.utils.misc.set_random_seed(cfg.seed)
+ # initialize logger
+ logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info")
+
+ # set model
+ model = ppsci.arch.MLP(**cfg.MODEL)
+
+ # set validator
+ eval_data_dict = reader.load_csv_file(
+ cfg.EVAL_CSV_PATH,
+ ("x", "y", "z", "u", "v", "w", "p"),
+ {
+ "x": "Points:0",
+ "y": "Points:1",
+ "z": "Points:2",
+ "u": "U:0",
+ "v": "U:1",
+ "w": "U:2",
+ "p": "p",
+ },
+ )
+ input_dict = {
+ "x": (eval_data_dict["x"] - cfg.CENTER[0]) * cfg.SCALE,
+ "y": (eval_data_dict["y"] - cfg.CENTER[1]) * cfg.SCALE,
+ "z": (eval_data_dict["z"] - cfg.CENTER[2]) * cfg.SCALE,
+ }
+ if "area" in input_dict.keys():
+ input_dict["area"] *= cfg.SCALE**cfg.DIM
+
+ label_dict = {
+ "p": eval_data_dict["p"],
+ "u": eval_data_dict["u"],
+ "v": eval_data_dict["v"],
+ "w": eval_data_dict["w"],
+ }
+ eval_dataloader_cfg = {
+ "dataset": {
+ "name": "NamedArrayDataset",
+ "input": input_dict,
+ "label": label_dict,
+ },
+ "sampler": {"name": "BatchSampler"},
+ "num_workers": 1,
+ }
+ sup_validator = ppsci.validate.SupervisedValidator(
+ {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size.sup_validator},
+ ppsci.loss.MSELoss("mean"),
+ {
+ "p": lambda out: out["p"],
+ "u": lambda out: out["u"],
+ "v": lambda out: out["v"],
+ "w": lambda out: out["w"],
+ },
+ metric={"MSE": ppsci.metric.MSE()},
+ name="ref_u_v_w_p",
+ )
+ validator = {sup_validator.name: sup_validator}
+
+ # set visualizer
+ visualizer = {
+ "visualize_u_v_w_p": ppsci.visualize.VisualizerVtu(
+ input_dict,
+ {
+ "p": lambda out: out["p"],
+ "u": lambda out: out["u"],
+ "v": lambda out: out["v"],
+ "w": lambda out: out["w"],
+ },
+ batch_size=cfg.EVAL.batch_size.sup_validator,
+ prefix="result_u_v_w_p",
+ ),
+ }
+
+ # initialize solver
solver = ppsci.solver.Solver(
model,
- constraint,
- OUTPUT_DIR,
- equation=equation,
- geom=geom,
+ output_dir=cfg.output_dir,
+ epochs=cfg.TRAIN.epochs,
+ iters_per_epoch=cfg.TRAIN.iters_per_epoch,
+ log_freq=cfg.log_freq,
+ seed=cfg.seed,
validator=validator,
visualizer=visualizer,
- pretrained_model_path=f"{OUTPUT_DIR}/checkpoints/latest",
- eval_with_no_grad=True,
+ pretrained_model_path=cfg.EVAL.pretrained_model_path,
+ eval_with_no_grad=cfg.EVAL.eval_with_no_grad,
)
+ # evaluate
solver.eval()
- # visualize prediction for pretrained model(optional)
+ # visualize prediction
solver.visualize()
+
+
+@hydra.main(version_base=None, config_path="./conf", config_name="aneurysm.yaml")
+def main(cfg: DictConfig):
+ if cfg.mode == "train":
+ train(cfg)
+ elif cfg.mode == "eval":
+ evaluate(cfg)
+ else:
+ raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/aneurysm/conf/aneurysm.yaml b/examples/aneurysm/conf/aneurysm.yaml
new file mode 100644
index 000000000..a3562262d
--- /dev/null
+++ b/examples/aneurysm/conf/aneurysm.yaml
@@ -0,0 +1,97 @@
+hydra:
+ run:
+ # dynamic output directory according to running time and override name
+ dir: outputs_aneurysm/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname}
+ job:
+ name: ${mode} # name of logfile
+ chdir: false # keep current working direcotry unchaned
+ config:
+ override_dirname:
+ exclude_keys:
+ - TRAIN.checkpoint_path
+ - TRAIN.pretrained_model_path
+ - EVAL.pretrained_model_path
+ - mode
+ - output_dir
+ - log_freq
+ sweep:
+ # output directory for multirun
+ dir: ${hydra.run.dir}
+ subdir: ./
+
+# general settings
+mode: train # running mode: train/eval
+seed: 2023
+output_dir: ${hydra:run.dir}
+log_freq: 20
+
+# set working condition
+NU: 0.025
+SCALE: 0.4
+RHO: 1.0
+DIM: 3
+
+# set geometry file path
+INLET_STL_PATH: "./stl/aneurysm_inlet.stl"
+OUTLET_STL_PATH: "./stl/aneurysm_outlet.stl"
+NOSLIP_STL_PATH: "./stl/aneurysm_noslip.stl"
+INTEGRAL_STL_PATH: "./stl/aneurysm_integral.stl"
+INTERIOR_STL_PATH: "./stl/aneurysm_closed.stl"
+
+# inlet velocity profile
+CENTER: [-18.40381048596882, -50.285383353981196, 12.848136936899031]
+INLET_NORMAL: [0.8526, -0.428, 0.299]
+INLET_CENTER: [-4.24298030045776, 4.082857101816247, -4.637790193399717]
+INLET_VEL: 1.5
+
+# set evaluate data path
+EVAL_CSV_PATH: "./data/aneurysm_parabolicInlet_sol0.csv"
+
+# model settings
+MODEL:
+ input_keys: ["x", "y", "z"]
+ output_keys: ["u", "v", "w", "p"]
+ num_layers: 6
+ hidden_size: 512
+ activation: "silu"
+ weight_norm: true
+
+# training settings
+TRAIN:
+ epochs: 1500
+ iters_per_epoch: 1000
+ iters_integral:
+ igc_outlet: 100
+ igc_integral: 100
+ save_freq: 20
+ eval_during_train: true
+ eval_freq: 20
+ lr_scheduler:
+ epochs: ${TRAIN.epochs}
+ iters_per_epoch: ${TRAIN.iters_per_epoch}
+ learning_rate: 0.001
+ gamma: 0.95
+ decay_steps: 15000
+ by_epoch: false
+ batch_size:
+ bc_inlet: 1100
+ bc_outlet: 650
+ bc_noslip: 5200
+ pde: 6000
+ igc_outlet: 1
+ igc_integral: 1
+ integral_batch_size:
+ igc_outlet: 310
+ igc_integral: 310
+ weight:
+ igc_outlet: {"normal_dot_vec": 0.1}
+ igc_integral: {"normal_dot_vec": 0.1}
+ pretrained_model_path: null
+ checkpoint_path: null
+
+# evaluation settings
+EVAL:
+ pretrained_model_path: null
+ eval_with_no_grad: true
+ batch_size:
+ sup_validator: 4096
\ No newline at end of file