Skip to content

Commit

Permalink
[Fluid Clean] remove paddle.fluid.dygraph.nn.conv2D (PaddlePaddle#1504)
Browse files Browse the repository at this point in the history
* [Fluid Clean] remove paddle.fluid.dygraph.nn.conv2D

* remove layers_old in ofa
  • Loading branch information
RachelXu7 authored Nov 8, 2022
1 parent dff848b commit d2bd1d2
Show file tree
Hide file tree
Showing 14 changed files with 54 additions and 1,390 deletions.
3 changes: 2 additions & 1 deletion demo/darts/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import ConstantInitializer, MSRAInitializer
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from paddle.nn import Conv2D
from paddle.fluid.dygraph.nn import Pool2D, BatchNorm, Linear
from paddle.fluid.dygraph.base import to_variable
from genotypes import PRIMITIVES
from genotypes import Genotype
Expand Down
18 changes: 7 additions & 11 deletions demo/darts/operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@
# limitations under the License.

import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm
from paddle.nn import Conv2D
from paddle.fluid.dygraph.nn import Pool2D, BatchNorm
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import ConstantInitializer, MSRAInitializer

Expand Down Expand Up @@ -58,10 +59,8 @@


def bn_param_config(affine=False):
gama = ParamAttr(
initializer=ConstantInitializer(value=1), trainable=affine)
beta = ParamAttr(
initializer=ConstantInitializer(value=0), trainable=affine)
gama = ParamAttr(initializer=ConstantInitializer(value=1), trainable=affine)
beta = ParamAttr(initializer=ConstantInitializer(value=0), trainable=affine)
return gama, beta


Expand Down Expand Up @@ -107,8 +106,7 @@ def __init__(self, c_in, c_out, affine=True):
param_attr=fluid.ParamAttr(initializer=MSRAInitializer()),
bias_attr=False)
gama, beta = bn_param_config(affine)
self.bn = BatchNorm(
num_channels=c_out, param_attr=gama, bias_attr=beta)
self.bn = BatchNorm(num_channels=c_out, param_attr=gama, bias_attr=beta)

def forward(self, x):
x = fluid.layers.relu(x)
Expand Down Expand Up @@ -140,8 +138,7 @@ def __init__(self, c_in, c_out, kernel_size, stride, padding, affine=True):
param_attr=fluid.ParamAttr(initializer=MSRAInitializer()),
bias_attr=False)
gama, beta = bn_param_config(affine)
self.bn1 = BatchNorm(
num_channels=c_in, param_attr=gama, bias_attr=beta)
self.bn1 = BatchNorm(num_channels=c_in, param_attr=gama, bias_attr=beta)
self.conv3 = Conv2D(
num_channels=c_in,
num_filters=c_in,
Expand Down Expand Up @@ -257,8 +254,7 @@ def __init__(self, c_in, c_out, kernel_size, stride, padding, affine=True):
param_attr=fluid.ParamAttr(initializer=MSRAInitializer()),
bias_attr=False)
gama, beta = bn_param_config(affine)
self.bn = BatchNorm(
num_channels=c_out, param_attr=gama, bias_attr=beta)
self.bn = BatchNorm(num_channels=c_out, param_attr=gama, bias_attr=beta)

def forward(self, x):
x = fluid.layers.relu(x)
Expand Down
9 changes: 4 additions & 5 deletions demo/one_shot/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@
import paddle
import paddle.fluid as fluid
from paddle.fluid.optimizer import AdamOptimizer
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.nn import Conv2D
from paddle.fluid.dygraph.nn import Pool2D, Linear
from paddle.fluid.dygraph.base import to_variable

from paddleslim.nas.one_shot import SuperMnasnet
Expand Down Expand Up @@ -142,8 +143,7 @@ def train_mnist(args, model, tokens=None):
epoch_num = args.epoch
BATCH_SIZE = 64

adam = AdamOptimizer(
learning_rate=0.001, parameter_list=model.parameters())
adam = AdamOptimizer(learning_rate=0.001, parameter_list=model.parameters())

train_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True)
Expand Down Expand Up @@ -187,8 +187,7 @@ def train_mnist(args, model, tokens=None):
print("Loss at epoch {} , acc is: {}".format(epoch, test_acc))

save_parameters = (not args.use_data_parallel) or (
args.use_data_parallel and
fluid.dygraph.parallel.Env().local_rank == 0)
args.use_data_parallel and fluid.dygraph.parallel.Env().local_rank == 0)
if save_parameters:
fluid.save_dygraph(model.state_dict(), "save_temp")
print("checkpoint saved")
Expand Down
3 changes: 2 additions & 1 deletion paddleslim/models/dygraph/mobilenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from paddle.nn import Conv2D
from paddle.fluid.dygraph.nn import Pool2D, BatchNorm, Linear
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid import framework

Expand Down
9 changes: 3 additions & 6 deletions paddleslim/models/dygraph/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@
import paddle
import paddle.fluid as fluid
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from paddle.nn import Conv2D
from paddle.fluid.dygraph.nn import Pool2D, BatchNorm, Linear


class ConvBNLayer(fluid.dygraph.Layer):
Expand Down Expand Up @@ -114,11 +115,7 @@ def __init__(self, layers=50, class_dim=100):
num_filters = [64, 128, 256, 512]

self.conv = ConvBNLayer(
num_channels=3,
num_filters=64,
filter_size=7,
stride=1,
act='relu')
num_channels=3, num_filters=64, filter_size=7, stride=1, act='relu')
self.pool2d_max = Pool2D(
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')

Expand Down
6 changes: 4 additions & 2 deletions paddleslim/nas/darts/search_space/conv_bert/model/bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,10 @@
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from paddle.nn import Conv2D
from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, Layer
from paddle.fluid.dygraph import Pool2D, BatchNorm, Linear
from paddle.fluid.dygraph import to_variable, guard
from paddle.fluid import ParamAttr
from paddle.fluid.initializer import MSRA
from .transformer_encoder import EncoderLayer
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,9 @@

import paddle
import paddle.fluid as fluid
from paddle.nn import Conv2D
from paddle.fluid.dygraph import Embedding, LayerNorm, Linear
from paddle.fluid.dygraph import Conv2D, BatchNorm, Pool2D
from paddle.fluid.dygraph import BatchNorm, Pool2D
from paddle.fluid.dygraph import Layer
from paddle.fluid.dygraph import to_variable
from paddle.fluid.initializer import NormalInitializer
Expand Down
8 changes: 1 addition & 7 deletions paddleslim/nas/ofa/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,4 @@
from .convert_super import supernet
from .utils.special_config import *
from .get_sub_model import *

from .utils.utils import get_paddle_version
pd_ver = get_paddle_version()
if pd_ver == 185:
from .layers_old import *
else:
from .layers import *
from .layers import *
21 changes: 6 additions & 15 deletions paddleslim/nas/ofa/convert_super.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,24 +18,15 @@
import numbers
import paddle
from ...common import get_logger
import paddle.nn as nn
from paddle.nn import Conv2D, Conv2DTranspose, Linear, LayerNorm, Embedding, SyncBatchNorm
from paddle import ParamAttr
from .utils.utils import get_paddle_version
pd_ver = get_paddle_version()
if pd_ver == 185:
import paddle.fluid.dygraph.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Conv2DTranspose, Linear, LayerNorm, Embedding
from paddle.fluid import ParamAttr
from .layers_old import *
from . import layers_old as layers
Layer = paddle.fluid.dygraph.Layer
else:
import paddle.nn as nn
from paddle.nn import Conv2D, Conv2DTranspose, Linear, LayerNorm, Embedding, SyncBatchNorm
from paddle import ParamAttr
from .layers import *
from . import layers
Layer = paddle.nn.Layer
from .layers import *
from . import layers
from paddle.nn import Layer
from .layers_base import Block
from . import layers_old
_logger = get_logger(__name__, level=logging.INFO)

__all__ = ['supernet', 'Convert']
Expand Down
12 changes: 6 additions & 6 deletions paddleslim/nas/ofa/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -994,19 +994,19 @@ def forward(self, input):
if in_dygraph_mode():
if feature_dim != self._mean.shape[0]:
batch_norm_out, t1, t2, t3, t4, _ = _C_ops.batch_norm(
input, weight, bias, mean, variance, self._momentum,
self._epsilon, self._data_format, not self.training,
self._use_global_stats, trainable_statistics, False, False)
input, mean, variance, weight, bias, not self.training,
self._momentum, self._epsilon, self._data_format,
self._use_global_stats, trainable_statistics)
self._mean[:feature_dim].set_value(mean)
self._variance[:feature_dim].set_value(variance)
mean_out[:feature_dim].set_value(mean_out_tmp)
variance_out[:feature_dim].set_value(variance_out_tmp)
return batch_norm_out
else:
batch_norm_out, t1, t2, t3, t4, _ = _C_ops.batch_norm(
input, weight, bias, mean, variance, self._momentum,
self._epsilon, self._data_format, not self.training,
self._use_global_stats, trainable_statistics, False)
input, mean, variance, weight, bias, not self.training,
self._momentum, self._epsilon, self._data_format,
self._use_global_stats, trainable_statistics)
return batch_norm_out

elif _in_legacy_dygraph():
Expand Down
Loading

0 comments on commit d2bd1d2

Please sign in to comment.