Skip to content

Commit

Permalink
[Fix]Fix code and doc of example brusselator3d; Fix lno arch; (Paddle…
Browse files Browse the repository at this point in the history
…Paddle#992)

* [Fix]Fix example and doc of brusselator3d; Fix lno arch;

* fix variables name of lno arch

* fix data path

---------

Co-authored-by: HydrogenSulfate <[email protected]>
  • Loading branch information
lijialin03 and HydrogenSulfate authored Sep 25, 2024
1 parent 729a6af commit 7f30985
Show file tree
Hide file tree
Showing 4 changed files with 94 additions and 82 deletions.
44 changes: 29 additions & 15 deletions docs/zh/examples/brusselator3d.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,19 @@

``` sh
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz
wget -P data -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz -o brusselator3d_dataset.tar
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz --create-dirs -o data/brusselator3d_dataset.npz
python brusselator3d.py
```

=== "模型评估命令"

``` sh
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz
wget -P Data -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz -o brusselator3d_dataset.tar
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz --create-dirs -o data/brusselator3d_dataset.npz
python brusselator3d.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/Brusselator3D/brusselator3d_pretrained.pdparams
```

Expand Down Expand Up @@ -73,9 +73,17 @@ examples/brusselator3d/conf/brusselator3d.yaml:39:40

在构建网络之前,需要根据参数设定,使用 `linespace` 明确各个维度长度,以便 LNO 网络进行 $\lambda$ 的初始化。用 PaddleScience 代码表示如下:

``` py linenums="120"
--8<--
examples/brusselator3d/brusselator3d.py:120:128
--8<--
```

另外,如果设置模型参数中 `use_grid``True`,不需要提前处理,模型会自动生成并添加网格,如果为 `False`,则需要在处理数据时,手动为数据添加网格,然后再输入模型:

``` py linenums="114"
--8<--
examples/brusselator3d/brusselator3d.py:114:122
examples/brusselator3d/brusselator3d.py:114:118
--8<--
```

Expand Down Expand Up @@ -103,19 +111,19 @@ examples/brusselator3d/conf/brusselator3d.yaml:54:58

`AdamW` 优化器基于 `Adam` 优化器进行了改进,用来解决 `Adam` 优化器中 L2 正则化失效的问题。

``` py linenums="124"
``` py linenums="130"
--8<--
examples/brusselator3d/brusselator3d.py:124:128
examples/brusselator3d/brusselator3d.py:130:134
--8<--
```

### 3.5 约束构建

本问题采用监督学习的方式进行训练,仅存在监督约束 `SupervisedConstraint`,代码如下:

``` py linenums="130"
``` py linenums="136"
--8<--
examples/brusselator3d/brusselator3d.py:130:156
examples/brusselator3d/brusselator3d.py:136:160
--8<--
```

Expand All @@ -139,19 +147,19 @@ examples/brusselator3d/brusselator3d.py:130:156

第三个参数是约束条件的名字,我们需要给每一个约束条件命名,方便后续对其索引。

``` py linenums="158"
``` py linenums="162"
--8<--
examples/brusselator3d/brusselator3d.py:158:159
examples/brusselator3d/brusselator3d.py:162:163
--8<--
```

### 3.6 评估器构建

在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,因此需要构建评估器:

``` py linenums="161"
``` py linenums="165"
--8<--
examples/brusselator3d/brusselator3d.py:161:189
examples/brusselator3d/brusselator3d.py:165:191
--8<--
```

Expand All @@ -166,9 +174,9 @@ examples/brusselator3d/brusselator3d.py:161:189

完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估、可视化。

``` py linenums="191"
``` py linenums="193"
--8<--
examples/brusselator3d/brusselator3d.py:191:204
examples/brusselator3d/brusselator3d.py:193:206
--8<--
```

Expand All @@ -180,6 +188,12 @@ examples/brusselator3d/brusselator3d.py
--8<--
```

``` py linenums="1" title="lno.py"
--8<--
ppsci/arch/lno.py
--8<--
```

## 5. 结果展示

下面展示了在验证集上的预测结果和标签。
Expand Down
46 changes: 21 additions & 25 deletions examples/brusselator3d/brusselator3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,12 @@ def train(cfg: DictConfig):
in_train_mean, in_train_std = data_funcs.get_mean_std(in_train)
label_train_mean, label_train_std = data_funcs.get_mean_std(label_train)

input_constraint = data_funcs.encode(in_train, in_train_mean, in_train_std)
input_validator = data_funcs.encode(in_val, in_train_mean, in_train_std)
if not cfg.MODEL.use_grid:
input_constraint = data_funcs.cat_grid(input_constraint)
input_validator = data_funcs.cat_grid(input_validator)

# set model
T = paddle.linspace(start=0, stop=19, num=cfg.NUM_T).reshape([1, cfg.NUM_T])
X = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[
Expand All @@ -132,11 +138,7 @@ def train(cfg: DictConfig):
{
"dataset": {
"name": "NamedArrayDataset",
"input": {
"input": data_funcs.cat_grid(
data_funcs.encode(in_train, in_train_mean, in_train_std)
)
},
"input": {"input": input_constraint},
"label": {
"output": data_funcs.encode(
label_train, label_train_mean, label_train_std
Expand All @@ -163,11 +165,7 @@ def train(cfg: DictConfig):
{
"dataset": {
"name": "NamedArrayDataset",
"input": {
"input": data_funcs.cat_grid(
data_funcs.encode(in_val, in_train_mean, in_train_std)
)
},
"input": {"input": input_validator},
"label": {"output": label_val},
},
"batch_size": cfg.TRAIN.batch_size,
Expand Down Expand Up @@ -218,6 +216,10 @@ def evaluate(cfg: DictConfig):
in_train_mean, in_train_std = data_funcs.get_mean_std(in_train)
label_train_mean, label_train_std = data_funcs.get_mean_std(label_train)

input_validator = data_funcs.encode(in_val, in_train_mean, in_train_std)
if not cfg.MODEL.use_grid:
input_validator = data_funcs.cat_grid(input_validator)

# set model
T = paddle.linspace(start=0, stop=19, num=cfg.NUM_T).reshape([1, cfg.NUM_T])
X = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[
Expand All @@ -233,11 +235,7 @@ def evaluate(cfg: DictConfig):
{
"dataset": {
"name": "NamedArrayDataset",
"input": {
"input": data_funcs.cat_grid(
data_funcs.encode(in_val, in_train_mean, in_train_std)
)
},
"input": {"input": input_validator},
"label": {"output": label_val},
},
"batch_size": cfg.EVAL.batch_size,
Expand Down Expand Up @@ -268,15 +266,10 @@ def evaluate(cfg: DictConfig):
solver.eval()

# visualize prediction
output_dict = model(
{
"input": paddle.to_tensor(
data_funcs.cat_grid(
data_funcs.encode(in_val[0:1], in_train_mean, in_train_std)
)
)
}
)
input_visualize = data_funcs.encode(in_val[0:1], in_train_mean, in_train_std)
if not cfg.MODEL.use_grid:
input_visualize = data_funcs.cat_grid(input_visualize)
output_dict = model({"input": paddle.to_tensor(input_visualize)})
pred = paddle.squeeze(
data_funcs.decode(output_dict["output"], label_train_mean, label_train_std)
).numpy()
Expand Down Expand Up @@ -341,9 +334,12 @@ def inference(cfg: DictConfig):
label_val = data_funcs.transform(labels_val, "label")
in_train_mean, in_train_std = data_funcs.get_mean_std(in_train)
label_train_mean, label_train_std = data_funcs.get_mean_std(label_train)
input_infer = data_funcs.encode(in_val, in_train_mean, in_train_std)
if not cfg.MODEL.use_grid:
input_infer = data_funcs.cat_grid(input_infer)

output_dict = predictor.predict(
{"input": data_funcs.encode(in_val, in_train_mean, in_train_std)},
{"input": input_infer},
cfg.INFER.batch_size,
)

Expand Down
2 changes: 1 addition & 1 deletion examples/brusselator3d/conf/brusselator3d.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ ORIG_R: 28
RESOLUTION: 2

# set data path
DATA_PATH: ./Data/Brusselator_force_train.npz
DATA_PATH: ./data/brusselator3d_dataset.npz

# model settings
MODEL:
Expand Down
84 changes: 43 additions & 41 deletions ppsci/arch/lno.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,23 +29,24 @@


class Laplace(nn.Layer):
"""Generic N-Dimensional Laplace Operator with Pole-Residue Method.
Args:
in_channels (int): Number of input channels of the first layer.
out_channels (int): Number of output channels of the last layer.
modes (Tuple[int, ...]): Number of modes to use for contraction in Laplace domain during training.
T (paddle.Tensor): Linspace of time dimension.
data (Tuple[paddle.Tensor, ...]): Linspaces of other dimensions.
"""

def __init__(
self,
in_channels: int,
out_channels: int,
modes: Tuple[int, ...],
T: paddle.Tensor,
Data: Tuple[paddle.Tensor, ...],
data: Tuple[paddle.Tensor, ...],
):
"""Generic N-Dimensional Laplace Operator with Pole-Residue Method.
Args:
in_channels (int): Number of input channels of the first layer.
out_channels (int): Number of output channels of the last layer.
modes (Tuple[int, ...]): Number of modes to use for contraction in Laplace domain during training.
T (paddle.Tensor): Linspace of time dimension.
Data (Tuple[paddle.Tensor, ...]): Linspaces of other dimensions.
"""
super().__init__()
self.char1 = "pqr"
self.char2 = "mnk"
Expand Down Expand Up @@ -73,14 +74,14 @@ def __init__(
self.create_parameter(residues_shape)
)

self.initialize_lambdas(T, Data)
self.initialize_lambdas(T, data)
self.get_einsum_eqs()

def _init_weights(self, weight) -> paddle.Tensor:
return initializer.uniform_(weight, a=0, b=self.scale)

def initialize_lambdas(self, T, Data) -> None:
self.t_lst = (T,) + Data
def initialize_lambdas(self, T, data) -> None:
self.t_lst = (T,) + data
self.lambdas = []
for i in range(self.dims):
t_i = self.t_lst[i]
Expand Down Expand Up @@ -185,35 +186,36 @@ def forward(self, x):


class LNO(base.Arch):
"""Laplace Neural Operator net.
Args:
input_keys (Tuple[str, ...]): Name of input keys, such as ("input1", "input2").
output_keys (Tuple[str, ...]): Name of output keys, such as ("output1", "output2").
width (int): Tensor width of Laplace Layer.
modes (Tuple[int, ...]): Number of modes to use for contraction in Laplace domain during training.
T (paddle.Tensor): Linspace of time dimension.
data (Tuple[paddle.Tensor, ...]): Linspaces of other dimensions.
in_features (int, optional): Number of input channels of the first layer.. Defaults to 1.
hidden_features (int, optional): Number of channels of the fully-connected layer. Defaults to 64.
activation (str, optional): The activation function. Defaults to "sin".
use_norm (bool, optional): Whether to use normalization layers. Defaults to True.
use_grid (bool, optional): Whether to create grid. Defaults to False.
"""

def __init__(
self,
input_keys: Tuple[str, ...],
output_keys: Tuple[str, ...],
width: int,
modes: Tuple[int, ...],
T: paddle.Tensor,
Data: Optional[Tuple[paddle.Tensor, ...]] = None,
data: Optional[Tuple[paddle.Tensor, ...]] = None,
in_features: int = 1,
hidden_features: int = 64,
activation: str = "sin",
use_norm: bool = True,
use_grid: bool = False,
):
"""Laplace Neural Operator net.
Args:
input_keys (Tuple[str, ...]): Name of input keys, such as ("input1", "input2").
output_keys (Tuple[str, ...]): Name of output keys, such as ("output1", "output2").
width (int): Tensor width of Laplace Layer.
modes (Tuple[int, ...]): Number of modes to use for contraction in Laplace domain during training.
T (paddle.Tensor): Linspace of time dimension.
Data (Tuple[paddle.Tensor, ...]): Linspaces of other dimensions.
in_features (int, optional): Number of input channels of the first layer.. Defaults to 1.
hidden_features (int, optional): Number of channels of the fully-connected layer. Defaults to 64.
activation (str, optional): The activation function. Defaults to "sin".
use_norm (bool, optional): Whether to use normalization layers. Defaults to True.
use_grid (bool, optional): Whether to create grid. Defaults to False.
"""
super().__init__()
self.input_keys = input_keys
self.output_keys = output_keys
Expand All @@ -222,14 +224,14 @@ def __init__(
self.dims = len(modes)
assert self.dims <= 3, "Only 3 dims and lower of modes are supported now."

if Data is None:
Data = ()
if data is None:
data = ()
assert (
self.dims == len(Data) + 1
), f"Dims of modes is {self.dims} but only {len(Data)} dims(except T) of data received."
self.dims == len(data) + 1
), f"Dims of modes is {self.dims} but only {len(data)} dims(except T) of data received."

self.fc0 = nn.Linear(in_features=in_features, out_features=self.width)
self.laplace = Laplace(self.width, self.width, self.modes, T, Data)
self.laplace = Laplace(self.width, self.width, self.modes, T, data)
self.conv = getattr(nn, f"Conv{self.dims}D")(
in_channels=self.width,
out_channels=self.width,
Expand All @@ -251,19 +253,19 @@ def __init__(

def get_grid(self, shape):
batchsize, size_t, size_x, size_y = shape[0], shape[1], shape[2], shape[3]
gridt = paddle.to_tensor(data=np.linspace(0, 1, size_t), dtype="float32")
gridt = gridt.reshape(1, size_t, 1, 1, 1).repeat(
gridt = paddle.linspace(0, 1, size_t)
gridt = gridt.reshape([1, size_t, 1, 1, 1]).tile(
[batchsize, 1, size_x, size_y, 1]
)
gridx = paddle.to_tensor(data=np.linspace(0, 1, size_x), dtype="float32")
gridx = gridx.reshape(1, 1, size_x, 1, 1).repeat(
gridx = paddle.linspace(0, 1, size_x)
gridx = gridx.reshape([1, 1, size_x, 1, 1]).tile(
[batchsize, size_t, 1, size_y, 1]
)
gridy = paddle.to_tensor(data=np.linspace(0, 1, size_y), dtype="float32")
gridy = gridy.reshape(1, 1, 1, size_y, 1).repeat(
gridy = paddle.linspace(0, 1, size_y)
gridy = gridy.reshape([1, 1, 1, size_y, 1]).tile(
[batchsize, size_t, size_x, 1, 1]
)
return paddle.concat(x=(gridt, gridx, gridy), axis=-1)
return paddle.concat([gridt, gridx, gridy], axis=-1)

def transpoe_to_NCDHW(self, x):
perm = [0, self.dims + 1] + list(range(1, self.dims + 1))
Expand Down

0 comments on commit 7f30985

Please sign in to comment.