diff --git a/docs/zh/examples/brusselator3d.md b/docs/zh/examples/brusselator3d.md index 0d98dfcb4..483458953 100644 --- a/docs/zh/examples/brusselator3d.md +++ b/docs/zh/examples/brusselator3d.md @@ -6,9 +6,9 @@ ``` sh # linux - wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz + wget -P data -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz # windows - # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz -o brusselator3d_dataset.tar + # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz --create-dirs -o data/brusselator3d_dataset.npz python brusselator3d.py ``` @@ -16,9 +16,9 @@ ``` sh # linux - wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz + wget -P Data -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz # windows - # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz -o brusselator3d_dataset.tar + # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/Brusselator3D/brusselator3d_dataset.npz --create-dirs -o data/brusselator3d_dataset.npz python brusselator3d.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/Brusselator3D/brusselator3d_pretrained.pdparams ``` @@ -73,9 +73,17 @@ examples/brusselator3d/conf/brusselator3d.yaml:39:40 在构建网络之前,需要根据参数设定,使用 `linespace` 明确各个维度长度,以便 LNO 网络进行 $\lambda$ 的初始化。用 PaddleScience 代码表示如下: +``` py linenums="120" +--8<-- +examples/brusselator3d/brusselator3d.py:120:128 +--8<-- +``` + +另外,如果设置模型参数中 `use_grid` 为 `True`,不需要提前处理,模型会自动生成并添加网格,如果为 `False`,则需要在处理数据时,手动为数据添加网格,然后再输入模型: + ``` py linenums="114" --8<-- -examples/brusselator3d/brusselator3d.py:114:122 +examples/brusselator3d/brusselator3d.py:114:118 --8<-- ``` @@ -103,9 +111,9 @@ examples/brusselator3d/conf/brusselator3d.yaml:54:58 `AdamW` 优化器基于 `Adam` 优化器进行了改进,用来解决 `Adam` 优化器中 L2 正则化失效的问题。 -``` py linenums="124" +``` py linenums="130" --8<-- -examples/brusselator3d/brusselator3d.py:124:128 +examples/brusselator3d/brusselator3d.py:130:134 --8<-- ``` @@ -113,9 +121,9 @@ examples/brusselator3d/brusselator3d.py:124:128 本问题采用监督学习的方式进行训练,仅存在监督约束 `SupervisedConstraint`,代码如下: -``` py linenums="130" +``` py linenums="136" --8<-- -examples/brusselator3d/brusselator3d.py:130:156 +examples/brusselator3d/brusselator3d.py:136:160 --8<-- ``` @@ -139,9 +147,9 @@ examples/brusselator3d/brusselator3d.py:130:156 第三个参数是约束条件的名字,我们需要给每一个约束条件命名,方便后续对其索引。 -``` py linenums="158" +``` py linenums="162" --8<-- -examples/brusselator3d/brusselator3d.py:158:159 +examples/brusselator3d/brusselator3d.py:162:163 --8<-- ``` @@ -149,9 +157,9 @@ examples/brusselator3d/brusselator3d.py:158:159 在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,因此需要构建评估器: -``` py linenums="161" +``` py linenums="165" --8<-- -examples/brusselator3d/brusselator3d.py:161:189 +examples/brusselator3d/brusselator3d.py:165:191 --8<-- ``` @@ -166,9 +174,9 @@ examples/brusselator3d/brusselator3d.py:161:189 完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估、可视化。 -``` py linenums="191" +``` py linenums="193" --8<-- -examples/brusselator3d/brusselator3d.py:191:204 +examples/brusselator3d/brusselator3d.py:193:206 --8<-- ``` @@ -180,6 +188,12 @@ examples/brusselator3d/brusselator3d.py --8<-- ``` +``` py linenums="1" title="lno.py" +--8<-- +ppsci/arch/lno.py +--8<-- +``` + ## 5. 结果展示 下面展示了在验证集上的预测结果和标签。 diff --git a/examples/brusselator3d/brusselator3d.py b/examples/brusselator3d/brusselator3d.py index 0c879e4bd..7a0b8b287 100644 --- a/examples/brusselator3d/brusselator3d.py +++ b/examples/brusselator3d/brusselator3d.py @@ -111,6 +111,12 @@ def train(cfg: DictConfig): in_train_mean, in_train_std = data_funcs.get_mean_std(in_train) label_train_mean, label_train_std = data_funcs.get_mean_std(label_train) + input_constraint = data_funcs.encode(in_train, in_train_mean, in_train_std) + input_validator = data_funcs.encode(in_val, in_train_mean, in_train_std) + if not cfg.MODEL.use_grid: + input_constraint = data_funcs.cat_grid(input_constraint) + input_validator = data_funcs.cat_grid(input_validator) + # set model T = paddle.linspace(start=0, stop=19, num=cfg.NUM_T).reshape([1, cfg.NUM_T]) X = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ @@ -132,11 +138,7 @@ def train(cfg: DictConfig): { "dataset": { "name": "NamedArrayDataset", - "input": { - "input": data_funcs.cat_grid( - data_funcs.encode(in_train, in_train_mean, in_train_std) - ) - }, + "input": {"input": input_constraint}, "label": { "output": data_funcs.encode( label_train, label_train_mean, label_train_std @@ -163,11 +165,7 @@ def train(cfg: DictConfig): { "dataset": { "name": "NamedArrayDataset", - "input": { - "input": data_funcs.cat_grid( - data_funcs.encode(in_val, in_train_mean, in_train_std) - ) - }, + "input": {"input": input_validator}, "label": {"output": label_val}, }, "batch_size": cfg.TRAIN.batch_size, @@ -218,6 +216,10 @@ def evaluate(cfg: DictConfig): in_train_mean, in_train_std = data_funcs.get_mean_std(in_train) label_train_mean, label_train_std = data_funcs.get_mean_std(label_train) + input_validator = data_funcs.encode(in_val, in_train_mean, in_train_std) + if not cfg.MODEL.use_grid: + input_validator = data_funcs.cat_grid(input_validator) + # set model T = paddle.linspace(start=0, stop=19, num=cfg.NUM_T).reshape([1, cfg.NUM_T]) X = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ @@ -233,11 +235,7 @@ def evaluate(cfg: DictConfig): { "dataset": { "name": "NamedArrayDataset", - "input": { - "input": data_funcs.cat_grid( - data_funcs.encode(in_val, in_train_mean, in_train_std) - ) - }, + "input": {"input": input_validator}, "label": {"output": label_val}, }, "batch_size": cfg.EVAL.batch_size, @@ -268,15 +266,10 @@ def evaluate(cfg: DictConfig): solver.eval() # visualize prediction - output_dict = model( - { - "input": paddle.to_tensor( - data_funcs.cat_grid( - data_funcs.encode(in_val[0:1], in_train_mean, in_train_std) - ) - ) - } - ) + input_visualize = data_funcs.encode(in_val[0:1], in_train_mean, in_train_std) + if not cfg.MODEL.use_grid: + input_visualize = data_funcs.cat_grid(input_visualize) + output_dict = model({"input": paddle.to_tensor(input_visualize)}) pred = paddle.squeeze( data_funcs.decode(output_dict["output"], label_train_mean, label_train_std) ).numpy() @@ -341,9 +334,12 @@ def inference(cfg: DictConfig): label_val = data_funcs.transform(labels_val, "label") in_train_mean, in_train_std = data_funcs.get_mean_std(in_train) label_train_mean, label_train_std = data_funcs.get_mean_std(label_train) + input_infer = data_funcs.encode(in_val, in_train_mean, in_train_std) + if not cfg.MODEL.use_grid: + input_infer = data_funcs.cat_grid(input_infer) output_dict = predictor.predict( - {"input": data_funcs.encode(in_val, in_train_mean, in_train_std)}, + {"input": input_infer}, cfg.INFER.batch_size, ) diff --git a/examples/brusselator3d/conf/brusselator3d.yaml b/examples/brusselator3d/conf/brusselator3d.yaml index c9bd4883f..ee625c6cb 100644 --- a/examples/brusselator3d/conf/brusselator3d.yaml +++ b/examples/brusselator3d/conf/brusselator3d.yaml @@ -37,7 +37,7 @@ ORIG_R: 28 RESOLUTION: 2 # set data path -DATA_PATH: ./Data/Brusselator_force_train.npz +DATA_PATH: ./data/brusselator3d_dataset.npz # model settings MODEL: diff --git a/ppsci/arch/lno.py b/ppsci/arch/lno.py index 89df7e6ce..752a3272f 100644 --- a/ppsci/arch/lno.py +++ b/ppsci/arch/lno.py @@ -29,23 +29,24 @@ class Laplace(nn.Layer): + """Generic N-Dimensional Laplace Operator with Pole-Residue Method. + + Args: + in_channels (int): Number of input channels of the first layer. + out_channels (int): Number of output channels of the last layer. + modes (Tuple[int, ...]): Number of modes to use for contraction in Laplace domain during training. + T (paddle.Tensor): Linspace of time dimension. + data (Tuple[paddle.Tensor, ...]): Linspaces of other dimensions. + """ + def __init__( self, in_channels: int, out_channels: int, modes: Tuple[int, ...], T: paddle.Tensor, - Data: Tuple[paddle.Tensor, ...], + data: Tuple[paddle.Tensor, ...], ): - """Generic N-Dimensional Laplace Operator with Pole-Residue Method. - - Args: - in_channels (int): Number of input channels of the first layer. - out_channels (int): Number of output channels of the last layer. - modes (Tuple[int, ...]): Number of modes to use for contraction in Laplace domain during training. - T (paddle.Tensor): Linspace of time dimension. - Data (Tuple[paddle.Tensor, ...]): Linspaces of other dimensions. - """ super().__init__() self.char1 = "pqr" self.char2 = "mnk" @@ -73,14 +74,14 @@ def __init__( self.create_parameter(residues_shape) ) - self.initialize_lambdas(T, Data) + self.initialize_lambdas(T, data) self.get_einsum_eqs() def _init_weights(self, weight) -> paddle.Tensor: return initializer.uniform_(weight, a=0, b=self.scale) - def initialize_lambdas(self, T, Data) -> None: - self.t_lst = (T,) + Data + def initialize_lambdas(self, T, data) -> None: + self.t_lst = (T,) + data self.lambdas = [] for i in range(self.dims): t_i = self.t_lst[i] @@ -185,6 +186,22 @@ def forward(self, x): class LNO(base.Arch): + """Laplace Neural Operator net. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input1", "input2"). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output1", "output2"). + width (int): Tensor width of Laplace Layer. + modes (Tuple[int, ...]): Number of modes to use for contraction in Laplace domain during training. + T (paddle.Tensor): Linspace of time dimension. + data (Tuple[paddle.Tensor, ...]): Linspaces of other dimensions. + in_features (int, optional): Number of input channels of the first layer.. Defaults to 1. + hidden_features (int, optional): Number of channels of the fully-connected layer. Defaults to 64. + activation (str, optional): The activation function. Defaults to "sin". + use_norm (bool, optional): Whether to use normalization layers. Defaults to True. + use_grid (bool, optional): Whether to create grid. Defaults to False. + """ + def __init__( self, input_keys: Tuple[str, ...], @@ -192,28 +209,13 @@ def __init__( width: int, modes: Tuple[int, ...], T: paddle.Tensor, - Data: Optional[Tuple[paddle.Tensor, ...]] = None, + data: Optional[Tuple[paddle.Tensor, ...]] = None, in_features: int = 1, hidden_features: int = 64, activation: str = "sin", use_norm: bool = True, use_grid: bool = False, ): - """Laplace Neural Operator net. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input1", "input2"). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output1", "output2"). - width (int): Tensor width of Laplace Layer. - modes (Tuple[int, ...]): Number of modes to use for contraction in Laplace domain during training. - T (paddle.Tensor): Linspace of time dimension. - Data (Tuple[paddle.Tensor, ...]): Linspaces of other dimensions. - in_features (int, optional): Number of input channels of the first layer.. Defaults to 1. - hidden_features (int, optional): Number of channels of the fully-connected layer. Defaults to 64. - activation (str, optional): The activation function. Defaults to "sin". - use_norm (bool, optional): Whether to use normalization layers. Defaults to True. - use_grid (bool, optional): Whether to create grid. Defaults to False. - """ super().__init__() self.input_keys = input_keys self.output_keys = output_keys @@ -222,14 +224,14 @@ def __init__( self.dims = len(modes) assert self.dims <= 3, "Only 3 dims and lower of modes are supported now." - if Data is None: - Data = () + if data is None: + data = () assert ( - self.dims == len(Data) + 1 - ), f"Dims of modes is {self.dims} but only {len(Data)} dims(except T) of data received." + self.dims == len(data) + 1 + ), f"Dims of modes is {self.dims} but only {len(data)} dims(except T) of data received." self.fc0 = nn.Linear(in_features=in_features, out_features=self.width) - self.laplace = Laplace(self.width, self.width, self.modes, T, Data) + self.laplace = Laplace(self.width, self.width, self.modes, T, data) self.conv = getattr(nn, f"Conv{self.dims}D")( in_channels=self.width, out_channels=self.width, @@ -251,19 +253,19 @@ def __init__( def get_grid(self, shape): batchsize, size_t, size_x, size_y = shape[0], shape[1], shape[2], shape[3] - gridt = paddle.to_tensor(data=np.linspace(0, 1, size_t), dtype="float32") - gridt = gridt.reshape(1, size_t, 1, 1, 1).repeat( + gridt = paddle.linspace(0, 1, size_t) + gridt = gridt.reshape([1, size_t, 1, 1, 1]).tile( [batchsize, 1, size_x, size_y, 1] ) - gridx = paddle.to_tensor(data=np.linspace(0, 1, size_x), dtype="float32") - gridx = gridx.reshape(1, 1, size_x, 1, 1).repeat( + gridx = paddle.linspace(0, 1, size_x) + gridx = gridx.reshape([1, 1, size_x, 1, 1]).tile( [batchsize, size_t, 1, size_y, 1] ) - gridy = paddle.to_tensor(data=np.linspace(0, 1, size_y), dtype="float32") - gridy = gridy.reshape(1, 1, 1, size_y, 1).repeat( + gridy = paddle.linspace(0, 1, size_y) + gridy = gridy.reshape([1, 1, 1, size_y, 1]).tile( [batchsize, size_t, size_x, 1, 1] ) - return paddle.concat(x=(gridt, gridx, gridy), axis=-1) + return paddle.concat([gridt, gridx, gridy], axis=-1) def transpoe_to_NCDHW(self, x): perm = [0, self.dims + 1] + list(range(1, self.dims + 1))