Skip to content

Commit

Permalink
[Lod][fluid_ops] Remove lod_level=0 in test/ (#69665)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored Dec 4, 2024
1 parent 31bb97d commit 251dcfc
Show file tree
Hide file tree
Showing 27 changed files with 75 additions and 214 deletions.
8 changes: 2 additions & 6 deletions test/collective/fleet/test_dgc_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ def check_dgc_momentum_optimizer(
mul_x = block.create_parameter(
dtype="float32",
shape=[dims[0], dims[1]],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1},
regularizer=(
Expand All @@ -55,12 +54,11 @@ def check_dgc_momentum_optimizer(
),
)
mul_y = block.create_var(
dtype="float32", shape=[dims[1], dims[2]], lod_level=0, name="mul.y"
dtype="float32", shape=[dims[1], dims[2]], name="mul.y"
)
mul_out = block.create_var(
dtype="float32",
shape=[dims[0], dims[2]],
lod_level=0,
name="mul.out",
)
block.append_op(
Expand Down Expand Up @@ -94,9 +92,7 @@ def check_dgc_momentum_optimizer(
dgc_momentum_optimizer._optimizer.get_velocity_str
)

mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out"
)
mean_out = block.create_var(dtype="float32", shape=[1], name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
)
Expand Down
2 changes: 1 addition & 1 deletion test/deprecated/ir/test_ir_fc_fuse_pass_deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class FCFusePassTest(PassTest):
def setUp(self):
with base.program_guard(self.main_program, self.startup_program):
data = paddle.static.data(
name="data", shape=[32, 128], dtype="float32", lod_level=0
name="data", shape=[32, 128], dtype="float32"
)
tmp_0 = paddle.static.nn.fc(
x=data, size=128, num_flatten_dims=1, activation="relu"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,9 @@ def setUp(self):
with paddle.static.program_guard(
self.main_program, self.startup_program
):
x = paddle.static.data(
name="x", shape=[128, 768], dtype="float32", lod_level=0
)
x = paddle.static.data(name="x", shape=[128, 768], dtype="float32")
bias = paddle.static.create_parameter(shape=[768], dtype='float32')
y = paddle.static.data(
name="y", shape=[128, 768], dtype="float32", lod_level=0
)
y = paddle.static.data(name="y", shape=[128, 768], dtype="float32")
x = x + bias
elementwise_out = x + y
out = paddle.static.nn.layer_norm(input=elementwise_out)
Expand Down Expand Up @@ -63,12 +59,8 @@ def setUp(self):
with paddle.static.program_guard(
self.main_program, self.startup_program
):
x = paddle.static.data(
name="x", shape=[128, 768], dtype="float32", lod_level=0
)
y = paddle.static.data(
name="y", shape=[128, 768], dtype="float32", lod_level=0
)
x = paddle.static.data(name="x", shape=[128, 768], dtype="float32")
y = paddle.static.data(name="y", shape=[128, 768], dtype="float32")
elementwise_out = x + y
out = paddle.static.nn.layer_norm(input=elementwise_out)

Expand Down
8 changes: 2 additions & 6 deletions test/deprecated/ir/test_ir_skip_layernorm_pass_deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,8 @@ class SkipLayerNormFusePassTest(PassTest):
def setUp(self):
paddle.enable_static()
with base.program_guard(self.main_program, self.startup_program):
x = paddle.static.data(
name="x", shape=[128, 768], dtype="float32", lod_level=0
)
y = paddle.static.data(
name="y", shape=[128, 768], dtype="float32", lod_level=0
)
x = paddle.static.data(name="x", shape=[128, 768], dtype="float32")
y = paddle.static.data(name="y", shape=[128, 768], dtype="float32")
elementwise_out = paddle.add(x=x, y=y)
out = paddle.static.nn.layer_norm(input=elementwise_out)

Expand Down
1 change: 0 additions & 1 deletion test/deprecated/legacy_test/dist_fleet_ctr.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ def net(self, args, is_train=True, batch_size=4, lr=0.01):
name="click",
shape=[-1, 1],
dtype="int64",
lod_level=0,
)

datas = [dnn_data, lr_data, label]
Expand Down
8 changes: 4 additions & 4 deletions test/deprecated/legacy_test/test_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,16 +444,16 @@ def test_in_memory_dataset_masterpatch1(self):
startup_program = base.Program()
with base.program_guard(train_program, startup_program):
var1 = paddle.static.data(
name="slot1", shape=[-1, 1], dtype="int64", lod_level=0
name="slot1", shape=[-1, 1], dtype="int64"
)
var2 = paddle.static.data(
name="slot2", shape=[-1, 1], dtype="int64", lod_level=0
name="slot2", shape=[-1, 1], dtype="int64"
)
var3 = paddle.static.data(
name="slot3", shape=[-1, 1], dtype="float32", lod_level=0
name="slot3", shape=[-1, 1], dtype="float32"
)
var4 = paddle.static.data(
name="slot4", shape=[-1, 1], dtype="float32", lod_level=0
name="slot4", shape=[-1, 1], dtype="float32"
)
slots_vars = [var1, var2, var3, var4]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,12 @@ def net(self):
x1 = (
base.default_main_program()
.global_block()
.create_var(dtype="float32", shape=[1], lod_level=0, name="x1")
.create_var(dtype="float32", shape=[1], name="x1")
)
x2 = (
base.default_main_program()
.global_block()
.create_var(dtype="float32", shape=[1], lod_level=0, name="x2")
.create_var(dtype="float32", shape=[1], name="x2")
)
x = paddle.add(x1, x2)
return x
Expand Down
4 changes: 1 addition & 3 deletions test/deprecated/legacy_test/test_layers_deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -1400,9 +1400,7 @@ def test_simple_conv2d(self):
def test_shuffle_batch(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = paddle.static.data(
name='X', shape=[-1, 4, 50], dtype='float32', lod_level=0
)
x = paddle.static.data(name='X', shape=[-1, 4, 50], dtype='float32')
out1 = shuffle_batch(x)
paddle.seed(1000)
out2 = shuffle_batch(x)
Expand Down
98 changes: 30 additions & 68 deletions test/deprecated/legacy_test/test_optimizer_deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,18 +37,17 @@ def check_sgd_optimizer(optimizer_attr):
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr=optimizer_attr,
)
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
dtype="float32", shape=[10, 8], name="mul.y"
)
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
dtype="float32", shape=[5, 8], name="mul.out"
)
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out"
dtype="float32", shape=[1], name="mean.out"
)
block.append_op(
type="mul",
Expand Down Expand Up @@ -81,18 +80,17 @@ def check_sgd_optimizer(optimizer_attr):
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr=optimizer_attr,
)
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
dtype="float32", shape=[10, 8], name="mul.y"
)
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
dtype="float32", shape=[5, 8], name="mul.out"
)
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out"
dtype="float32", shape=[1], name="mean.out"
)
block.append_op(
type="mul",
Expand Down Expand Up @@ -133,15 +131,12 @@ def test_vanilla_momentum_optimizer(self):
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1},
)
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
)
mul_y = block.create_var(dtype="float32", shape=[10, 8], name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
dtype="float32", shape=[5, 8], name="mul.out"
)
block.append_op(
type="mul",
Expand All @@ -153,9 +148,7 @@ def test_vanilla_momentum_optimizer(self):
momentum_optimizer = self.MockMomentum(
learning_rate=learning_rate, momentum=0.2
)
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out"
)
mean_out = block.create_var(dtype="float32", shape=[1], name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
)
Expand Down Expand Up @@ -192,25 +185,20 @@ def test_nesterov_momentum_optimizer(self):
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1},
)
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
)
mul_y = block.create_var(dtype="float32", shape=[10, 8], name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
dtype="float32", shape=[5, 8], name="mul.out"
)
block.append_op(
type="mul",
inputs={"X": mul_x, "Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1},
)
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out"
)
mean_out = block.create_var(dtype="float32", shape=[1], name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
)
Expand Down Expand Up @@ -263,25 +251,20 @@ def test_adam_optimizer(self):
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1},
)
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
)
mul_y = block.create_var(dtype="float32", shape=[10, 8], name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
dtype="float32", shape=[5, 8], name="mul.out"
)
block.append_op(
type="mul",
inputs={"X": mul_x, "Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1},
)
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out"
)
mean_out = block.create_var(dtype="float32", shape=[1], name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}
)
Expand Down Expand Up @@ -321,45 +304,32 @@ def net(self, return_input=False, with_dropout=False, with_seed=False):
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32", shape=[5, 10], lod_level=0, name="mul.x"
)
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
dtype="float32", shape=[5, 10], name="mul.x"
)
mul_y = block.create_var(dtype="float32", shape=[10, 8], name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
dtype="float32", shape=[5, 8], name="mul.out"
)

if with_dropout is True:
mul_out_drop = block.create_var(
dtype="float32",
shape=[5, 8],
lod_level=0,
name="mul.out.dropout",
)
mul_out_mask = block.create_var(
dtype="uint8", shape=[5, 8], lod_level=0, name="mul.out.mask"
dtype="uint8", shape=[5, 8], name="mul.out.mask"
)
if with_seed is True:
seed_out = block.create_var(
dtype="int32", shape=[1], name="seed.out"
)

b1 = block.create_parameter(
dtype="float32", shape=[5, 8], lod_level=0, name="b1"
)
b1_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="b1_out"
)
b2 = block.create_parameter(
dtype="float32", shape=[5, 8], lod_level=0, name="b2"
)
b2_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="b2_out"
)
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out"
)
b1 = block.create_parameter(dtype="float32", shape=[5, 8], name="b1")
b1_out = block.create_var(dtype="float32", shape=[5, 8], name="b1_out")
b2 = block.create_parameter(dtype="float32", shape=[5, 8], name="b2")
b2_out = block.create_var(dtype="float32", shape=[5, 8], name="b2_out")
mean_out = block.create_var(dtype="float32", shape=[1], name="mean.out")
block.append_op(
type="mul",
inputs={"X": mul_x, "Y": mul_y},
Expand Down Expand Up @@ -927,23 +897,15 @@ def net(self):
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32", shape=[5, 10], lod_level=0, name="mul.x"
)
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y"
dtype="float32", shape=[5, 10], name="mul.x"
)
mul_y = block.create_var(dtype="float32", shape=[10, 8], name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out"
)
b1 = block.create_parameter(
dtype="float32", shape=[5, 8], lod_level=0, name="b1"
)
b1_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="b1_out"
)
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out"
dtype="float32", shape=[5, 8], name="mul.out"
)
b1 = block.create_parameter(dtype="float32", shape=[5, 8], name="b1")
b1_out = block.create_var(dtype="float32", shape=[5, 8], name="b1_out")
mean_out = block.create_var(dtype="float32", shape=[1], name="mean.out")
block.append_op(
type="mul",
inputs={"X": mul_x, "Y": mul_y},
Expand Down
Loading

0 comments on commit 251dcfc

Please sign in to comment.