Skip to content

Commit

Permalink
[Ospp] VisualDL Support Paddle PIR (#1279)
Browse files Browse the repository at this point in the history
* VisualDL Adaptation for Paddle PIR

* [Doc] Add document for VisualDL Adaptation for Paddle PIR

* Dynamic graph changemodel interfaces support *.json file

* [Doc] Add document for VisualDL Adaptation for Paddle PIR

* [Doc] Add document for VisualDL Adaptation for Paddle PIR

* Update Paddle PIR Visualization.md

* Fix code style

* Fix paddle dependency
  • Loading branch information
cse0001 authored Oct 14, 2024
1 parent c988ab5 commit 2c25994
Show file tree
Hide file tree
Showing 16 changed files with 843 additions and 52 deletions.
69 changes: 69 additions & 0 deletions demo/components/cond_inside_cond_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# Copyright (c) 2024 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
import paddle
from visualdl import LogWriter
"""
pseudocode:
for i in range(1, 10):
a = 2 * i
if i < 5:
if i >= 3:
return a + a
else:
return a - a
else:
if i < 8:
return a * a
else:
return a / a
"""
paddle.enable_static()


def less_than_branch(i, a):
return paddle.static.nn.cond(
i >= 3.0,
lambda: paddle.add(a, a),
lambda: paddle.subtract(a, a),
)


def greater_equal_branch(i, a):
return paddle.static.nn.cond(
i < 8.0,
lambda: paddle.multiply(a, a),
lambda: paddle.divide(a, a),
)


main_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program, startup_program):
i = paddle.static.data(name="i", shape=[1], dtype='float32')
i.stop_gradient = False
a = 2.0 * i
out = paddle.static.nn.cond(
i < 5.0,
lambda: less_than_branch(i, a),
lambda: greater_equal_branch(i, a),
)
mean = paddle.mean(out)

with LogWriter(logdir="./log/cond_inside_cond_test/") as writer:
writer.add_graph(
model=main_program,
input_spec=[paddle.static.InputSpec([1], dtype='float32')],
verbose=True,
is_pir=True)
62 changes: 62 additions & 0 deletions demo/components/cond_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# Copyright (c) 2024 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
import paddle
from visualdl import LogWriter

paddle.enable_static()
"""
pseudocode:
for i in range(1, 10):
a = 2 * i
if i < 5:
return a + a
else:
return a - a
"""


class ConditionalLayer(paddle.nn.Layer):
def __init__(self):
super(ConditionalLayer, self).__init__()

def forward(self, i):
a = 2.0 * i
out = paddle.static.nn.cond(
i < 5.0,
lambda: paddle.add(a, a),
lambda: paddle.subtract(a, a),
)
return out


main_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program, startup_program):
i = paddle.static.data(name="i", shape=[1], dtype='float32')
i.stop_gradient = False
a = 2.0 * i
out = paddle.static.nn.cond(
i < 5.0,
lambda: paddle.add(a, a),
lambda: paddle.subtract(a, a),
)
mean = paddle.mean(out)

with LogWriter(logdir="./log/cond_test/") as writer:
writer.add_graph(
model=main_program,
input_spec=[paddle.static.InputSpec([1], 'float32')],
verbose=True,
is_pir=True)
54 changes: 54 additions & 0 deletions demo/components/pir_graph_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
# Copyright (c) 2024 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
import paddle
import paddle.nn.functional as F
from paddle import nn
from visualdl import LogWriter


class MyNet(nn.Layer):
def __init__(self):
super(MyNet, self).__init__()
self.conv1 = nn.Conv2D(
in_channels=1, out_channels=20, kernel_size=5, stride=1, padding=2)
self.max_pool1 = nn.MaxPool2D(kernel_size=2, stride=2)
self.conv2 = nn.Conv2D(
in_channels=20,
out_channels=20,
kernel_size=5,
stride=1,
padding=2)
self.max_pool2 = nn.MaxPool2D(kernel_size=2, stride=2)
self.fc = nn.Linear(in_features=980, out_features=10)

def forward(self, inputs):
x = self.conv1(inputs)
x = F.relu(x)
x = self.max_pool1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.max_pool2(x)
x = paddle.reshape(x, [x.shape[0], -1])
x = self.fc(x)
return x


net = MyNet()
with LogWriter(logdir="./log/pir_graph_test/") as writer:
writer.add_graph(
model=net,
input_spec=[paddle.static.InputSpec([-1, 1, 28, 28], 'float32')],
verbose=True,
is_pir=True)
Original file line number Diff line number Diff line change
@@ -1,6 +1,18 @@
# Copyright (c) 2024 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
import paddle
from paddle import ir

from visualdl import LogWriter

paddle.enable_static()
Expand All @@ -18,11 +30,9 @@
batch_norm = paddle.nn.BatchNorm(32, act='relu', data_layout='NHWC')
out = batch_norm(conv2d(tanh_out))

newir_program = ir.translate_to_new_ir(main_program.desc)

with LogWriter(logdir="./log/program_test/") as writer:
writer.add_graph(
model=newir_program,
model=main_program,
input_spec=[paddle.static.InputSpec([-1, 1, 28, 28], 'float32')],
verbose=True,
is_pir=True)
49 changes: 49 additions & 0 deletions demo/components/while_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# Copyright (c) 2024 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
import paddle
from visualdl import LogWriter

paddle.enable_static()
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program, startup_program):
linear = paddle.nn.Linear(16, 10)

def cond(i, loop_len, x, result):
return i < loop_len

def body(i, loop_len, x, result):
result = linear(x)
paddle.increment(i)
return [i, loop_len, x, result]

x = paddle.static.data(name='x', shape=[32, 16], dtype='float32')
i = paddle.zeros(shape=[1], dtype='int64')
loop_len = paddle.ones(shape=[1], dtype='int64')
result = paddle.zeros(
shape=x.shape[:-1] + linear.weight.shape[-1:], dtype="float32"
)
result.stop_gradient = False
_, _, _, results = paddle.static.nn.while_loop(
cond, body, [i, loop_len, x, result]
)
loss = paddle.mean(results)

with LogWriter(logdir="./log/while_test/") as writer:
writer.add_graph(
model=main_program,
input_spec=[paddle.static.InputSpec([1], 'float32')],
verbose=True,
is_pir=True)
Loading

0 comments on commit 2c25994

Please sign in to comment.