1489 lines
42 KiB
Python
1489 lines
42 KiB
Python
import torch
|
|
import torch.nn as nn
|
|
import numpy as np
|
|
import os
|
|
|
|
pwd = os.path.dirname(__file__)
|
|
|
|
class TestCaseExample(nn.Module):
|
|
def __init__(self):
|
|
super(TestCaseExample, self).__init__()
|
|
|
|
# add 128 conv
|
|
self.conv_128 = nn.Conv2d(4, 4, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=4)
|
|
|
|
# weight
|
|
w_128 = torch.tensor( np.array([ 1.0 ]*4).reshape(4,1,1,1) ).float()
|
|
b_128 = torch.tensor(np.array([128]*4)).float()
|
|
self.conv_128.weight.data = w_128
|
|
self.conv_128.bias.data = b_128
|
|
|
|
def forward(self, x):
|
|
x = self.conv_128(x)
|
|
return x
|
|
|
|
|
|
model = TestCaseExample()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 4, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCaseExample')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
|
|
|
|
class TestCase_fuse_Gemm_into_Gemm_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase_fuse_Gemm_into_Gemm_handler, self).__init__()
|
|
|
|
self.weight = torch.rand((10,10))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(10,10)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
def forward(self, x):
|
|
|
|
# x = torch.matmul(x, self.weight)
|
|
# x = torch.matmul(x, self.weight2)
|
|
|
|
x = self.linear(x)
|
|
x = self.linear2(x)
|
|
|
|
return x
|
|
|
|
|
|
model = TestCase_fuse_Gemm_into_Gemm_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn( 1, 10, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase_fuse_Gemm_into_Gemm_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
class TestCase2_fuse_Gemm_into_Gemm_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase2_fuse_Gemm_into_Gemm_handler, self).__init__()
|
|
|
|
self.weight = torch.rand((10,10))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(10,10)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
def forward(self, x):
|
|
|
|
# x = torch.matmul(x, self.weight)
|
|
# x = torch.matmul(x, self.weight2)
|
|
|
|
x = self.linear(x)
|
|
y = x - 128
|
|
x = self.linear2(x)
|
|
|
|
return x, y
|
|
|
|
|
|
model = TestCase2_fuse_Gemm_into_Gemm_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn( 1, 10, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase2_fuse_Gemm_into_Gemm_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
|
|
class TestCase3_split(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase3_split, self).__init__()
|
|
|
|
# add 128 conv
|
|
self.conv_128 = nn.Conv2d(4, 4, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=4)
|
|
|
|
# weight
|
|
w_128 = torch.tensor( np.array([ 1.0 ]*4).reshape(4,1,1,1) ).float()
|
|
b_128 = torch.tensor(np.array([128]*4)).float()
|
|
self.conv_128.weight.data = w_128
|
|
self.conv_128.bias.data = b_128
|
|
|
|
def forward(self, x):
|
|
x = self.conv_128(x)
|
|
a,b = torch.split(x, [1,3], dim=1)
|
|
b = b + 1
|
|
|
|
return torch.cat((a,b),1)
|
|
|
|
|
|
model = TestCase3_split()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 4, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase3_split')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
class TestCase4_three_conv(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase4_three_conv, self).__init__()
|
|
|
|
# add 128 conv
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
self.conv2 = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
self.conv2.weight.data = w
|
|
self.conv2.bias.data = b
|
|
|
|
self.conv3 = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
self.conv3.weight.data = w
|
|
self.conv3.bias.data = b
|
|
|
|
def forward(self, x):
|
|
x = self.conv(x)
|
|
y = self.conv2(x)
|
|
z = self.conv2(x)
|
|
|
|
return y, z
|
|
|
|
|
|
model = TestCase4_three_conv()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase4_three_conv')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase5_fuse_Gemm_into_Gemm_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase5_fuse_Gemm_into_Gemm_handler, self).__init__()
|
|
|
|
self.weight = torch.rand((10,10))
|
|
self.weight2 = torch.rand((10,10))
|
|
self.weight3 = torch.rand((10,10))
|
|
self.weight4 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(10,10)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear3 = nn.Linear(10,10)
|
|
self.linear4 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
self.linear3.weight.data = self.weight3
|
|
self.linear4.weight.data = self.weight4
|
|
|
|
def forward(self, x):
|
|
|
|
# x = torch.matmul(x, self.weight)
|
|
# x = torch.matmul(x, self.weight2)
|
|
|
|
x = self.linear(x)
|
|
x = x - 128
|
|
x = self.linear2(x)
|
|
x = self.linear3(x)
|
|
x = self.linear4(x)
|
|
|
|
return x
|
|
|
|
|
|
model = TestCase5_fuse_Gemm_into_Gemm_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn( 1, 10, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase5_fuse_Gemm_into_Gemm_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
|
|
class TestCase6_fuse_Add_into_Conv_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase6_fuse_Add_into_Conv_handler, self).__init__()
|
|
|
|
# add 128 conv
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
x = torch.add(x, torch.tensor(np.array([1,2,3]).reshape(1,3,1,1)))
|
|
x = torch.add(x, torch.tensor(np.array([4,5,6]).reshape(1,3,1,1)))
|
|
|
|
return x
|
|
|
|
|
|
model = TestCase6_fuse_Add_into_Conv_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase6_fuse_Add_into_Conv_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
|
|
class TestCase7_fuse_Bn_into_Gemm_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase7_fuse_Bn_into_Gemm_handler, self).__init__()
|
|
|
|
self.weight = torch.rand((10,10))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(10,10)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
self.bn = nn.BatchNorm1d(10)
|
|
self.bn.weight.data = torch.tensor(np.array([0.1]*10).astype(np.float32))
|
|
self.bn.bias.data = torch.tensor(np.array([0.2]*10).astype(np.float32))
|
|
self.bn.running_mean.data = torch.tensor(np.array([0.3]*10).astype(np.float32))
|
|
self.bn.running_var.data = torch.tensor(np.array([0.4]*10).astype(np.float32))
|
|
|
|
self.bn2 = nn.BatchNorm1d(10)
|
|
self.bn2.weight.data = torch.tensor(np.array([0.5]*10).astype(np.float32))
|
|
self.bn2.bias.data = torch.tensor(np.array([0.6]*10).astype(np.float32))
|
|
self.bn2.running_mean.data = torch.tensor(np.array([0.7]*10).astype(np.float32))
|
|
self.bn2.running_var.data = torch.tensor(np.array([0.8]*10).astype(np.float32))
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.linear(x)
|
|
x = self.bn(x)
|
|
x = self.linear2(x)
|
|
x = self.bn2(x)
|
|
|
|
return x
|
|
|
|
|
|
model = TestCase7_fuse_Bn_into_Gemm_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 10, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase7_fuse_Bn_into_Gemm_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
class TestCase8_fuse_MatMul_and_Add_into_Gemm_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase8_fuse_MatMul_and_Add_into_Gemm_handler, self).__init__()
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
x = torch.matmul(x, torch.tensor(np.array([0.2]*100).reshape(10,10)).float())
|
|
x += 128
|
|
|
|
tmp1 = x*3
|
|
tmp2 = x*4
|
|
tmp2 = torch.matmul(tmp2, torch.tensor(np.array([0.2]*100).reshape(10,10)).float())
|
|
tmp2 = torch.add(tmp2, torch.tensor(np.array([0.1]*10).reshape(10)).float())
|
|
|
|
return x, tmp1, tmp2
|
|
|
|
|
|
model = TestCase8_fuse_MatMul_and_Add_into_Gemm_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 10, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase8_fuse_MatMul_and_Add_into_Gemm_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
class TestCase9_fuse_MatMul_and_Add_into_Gemm_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase9_fuse_MatMul_and_Add_into_Gemm_handler, self).__init__()
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
x = torch.matmul(x, torch.tensor(np.array([0.2]*100).reshape(1,1, 10,10)).float())
|
|
x += 128
|
|
|
|
tmp1 = x*3
|
|
tmp2 = x*4
|
|
tmp2 = torch.matmul(tmp2, torch.tensor(np.array([0.2]*100).reshape(10,10)).float())
|
|
tmp2 = torch.add(tmp2, torch.tensor(np.array([0.1]*10).reshape(10)).float())
|
|
|
|
return x, tmp1, tmp2
|
|
|
|
|
|
model = TestCase9_fuse_MatMul_and_Add_into_Gemm_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1,1,1, 10, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase9_fuse_MatMul_and_Add_into_Gemm_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase10_fuse_Mul_and_Add_into_Bn_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase10_fuse_Mul_and_Add_into_Bn_handler, self).__init__()
|
|
|
|
# add 128 conv
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
def forward(self, x):
|
|
|
|
x += 128
|
|
y = self.conv(x)
|
|
y = torch.add(y, torch.tensor(np.array([1,2,3]).reshape(1,3,1,1)))
|
|
y = torch.add(y, torch.tensor(np.array([4,5,6]).reshape(1,3,1,1)))
|
|
|
|
z = torch.mul(x, torch.tensor(np.array([1,2,3]).reshape(1,3,1,1)))
|
|
z = torch.add(z, torch.tensor(np.array([1,2,3]).reshape(1,3,1,1)))
|
|
|
|
x *= 0.2
|
|
x = torch.add(x, torch.tensor(np.array([1,2,3]).reshape(1,3,1,1)))
|
|
x = torch.add(x, torch.tensor(np.array([4,5,6]).reshape(1,3,1,1)))
|
|
|
|
return x, y, z
|
|
|
|
|
|
model = TestCase10_fuse_Mul_and_Add_into_Bn_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase10_fuse_Mul_and_Add_into_Bn_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
class TestCase11_fuse_consecutive_transposes_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase11_fuse_consecutive_transposes_handler, self).__init__()
|
|
|
|
# add 128 conv
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
def forward(self, x):
|
|
|
|
# use onnx-modifier to gen consecutive transpose
|
|
x = self.conv(x)
|
|
x = x.transpose(3,2)
|
|
|
|
return x
|
|
|
|
|
|
model = TestCase11_fuse_consecutive_transposes_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase11_fuse_consecutive_transposes_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase12_eliminate_consecutive_Cast_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase12_eliminate_consecutive_Cast_handler, self).__init__()
|
|
|
|
# add 128 conv
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
def forward(self, x):
|
|
|
|
# use onnx-modifier to gen consecutive cast
|
|
x = self.conv(x)
|
|
|
|
y = x + 128
|
|
y = y.type(torch.float32)
|
|
y = y.type(torch.int64)
|
|
|
|
x = x.type(torch.int64)
|
|
x = x.type(torch.float32)
|
|
x += 128
|
|
|
|
return x, y
|
|
|
|
|
|
model = TestCase12_eliminate_consecutive_Cast_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase12_eliminate_consecutive_Cast_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
|
|
class TestCase13_eliminate_consecutive_reshape_like_nodes_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase13_eliminate_consecutive_reshape_like_nodes_handler, self).__init__()
|
|
|
|
# add 128 conv
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
self.dropout = nn.Dropout(p=0.2)
|
|
self.dropout2 = nn.Dropout(p=0.3)
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
|
|
y = x + 128
|
|
y = torch.reshape(y, (1, 600, 3, 400))
|
|
y = torch.reshape(y, (1, 3, 600, 400))
|
|
y1 = y + 127
|
|
y2 = y + 126
|
|
|
|
z = x + 125
|
|
# z = torch.reshape(z, (1, 1, 1200, 600))
|
|
z = torch.squeeze(z, 0)
|
|
z = torch.reshape(z, (3, 600, 400))
|
|
|
|
|
|
# # will be optimized when doing onnx export, add dropout node in onnx-modifier
|
|
# x = self.dropout(x)
|
|
# x = self.dropout2(x)
|
|
|
|
x += 124
|
|
x = torch.reshape(x, (1, 720000))
|
|
x = torch.flatten(x, start_dim=1)
|
|
|
|
return x, y1, y2, z
|
|
|
|
|
|
model = TestCase13_eliminate_consecutive_reshape_like_nodes_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase13_eliminate_consecutive_reshape_like_nodes_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
|
|
class TestCase14_nop_Maxpool_and_AveragePool_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase14_nop_Maxpool_and_AveragePool_handler, self).__init__()
|
|
|
|
# add 128 conv
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
self.maxpool = nn.MaxPool2d((1, 1), stride=(1, 1))
|
|
self.avgpool = nn.AvgPool2d((1, 1), stride=(1, 1))
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
|
|
y = x + 128
|
|
z = x + 128
|
|
y = self.maxpool(y)
|
|
z = self.avgpool(z)
|
|
y1 = y + 128
|
|
z1 = z + 128
|
|
|
|
return y1, z1
|
|
|
|
|
|
model = TestCase14_nop_Maxpool_and_AveragePool_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase14_nop_Maxpool_and_AveragePool_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
|
|
class TestCase15_eliminate_Expand_before_by_broadcast_nodes_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase15_eliminate_Expand_before_by_broadcast_nodes_handler, self).__init__()
|
|
|
|
self.weight = torch.rand((10,10))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(10,10)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
self.bn = nn.BatchNorm1d(10)
|
|
self.bn.weight.data = torch.tensor(np.array([0.1]*10).astype(np.float32))
|
|
self.bn.bias.data = torch.tensor(np.array([0.2]*10).astype(np.float32))
|
|
self.bn.running_mean.data = torch.tensor(np.array([0.3]*10).astype(np.float32))
|
|
self.bn.running_var.data = torch.tensor(np.array([0.4]*10).astype(np.float32))
|
|
|
|
self.bn2 = nn.BatchNorm1d(10)
|
|
self.bn2.weight.data = torch.tensor(np.array([0.5]*10).astype(np.float32))
|
|
self.bn2.bias.data = torch.tensor(np.array([0.6]*10).astype(np.float32))
|
|
self.bn2.running_mean.data = torch.tensor(np.array([0.7]*10).astype(np.float32))
|
|
self.bn2.running_var.data = torch.tensor(np.array([0.8]*10).astype(np.float32))
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.linear(x)
|
|
x = self.bn(x)
|
|
x = self.linear2(x)
|
|
x = self.bn2(x)
|
|
y = x + 126
|
|
z = x + 127
|
|
|
|
|
|
x = x.expand(1,1,1,10)
|
|
x = torch.mul(x, torch.tensor(np.array([0.7]).reshape(1,1,1,1).astype(np.float32)))
|
|
x = x.expand(1,1,1,10)
|
|
x = torch.reshape(x, (1,2,5,1))
|
|
x += 129
|
|
|
|
return x
|
|
|
|
# duplicate name issue from expand shape const node
|
|
y1 = y + 121
|
|
y = y.expand(1,1,1,10)
|
|
y = torch.mul(y, y1)
|
|
|
|
z1 = z + 122
|
|
z = z.expand(1,1,1,10)
|
|
z2 = torch.reshape(z, (1, 2, 5, 1))
|
|
z = torch.mul(z, z1)
|
|
|
|
return x, y, z, z2
|
|
|
|
|
|
model = TestCase15_eliminate_Expand_before_by_broadcast_nodes_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 10, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase15_eliminate_Expand_before_by_broadcast_nodes_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase16_fuse_mul_and_add_into_gemm_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase16_fuse_mul_and_add_into_gemm_handler, self).__init__()
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
x = torch.matmul(x, torch.tensor(np.array([0.2]*10).reshape(10,1)).float())
|
|
x *= 0.7
|
|
x += 128
|
|
y = x + 127
|
|
z = x + 126
|
|
|
|
return y, z
|
|
|
|
|
|
model = TestCase16_fuse_mul_and_add_into_gemm_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 10, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase16_fuse_mul_and_add_into_gemm_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase17_fuse_consecutive_reducemean_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase17_fuse_consecutive_reducemean_handler, self).__init__()
|
|
|
|
# add 128 conv
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
|
|
y = torch.mean(x, dim=3, keepdim=True)
|
|
y = torch.mean(y, dim=2, keepdim=True)
|
|
y = torch.mean(y, dim=1, keepdim=True)
|
|
|
|
z = torch.mean(x, dim=3)
|
|
z = torch.mean(z, dim=2)
|
|
z1 = z + 111
|
|
|
|
x = torch.mean(x, dim=3)
|
|
x = torch.mean(x, dim=2)
|
|
x = torch.mean(x, dim=1)
|
|
|
|
|
|
|
|
x += 128
|
|
|
|
return x, y, z, z1
|
|
|
|
|
|
model = TestCase17_fuse_consecutive_reducemean_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase17_fuse_consecutive_reducemean_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
|
|
class TestCase18_shared_weight_issue(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase18_shared_weight_issue, self).__init__()
|
|
|
|
self.weight = torch.rand((10,10))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(10,10)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
self.bn = nn.BatchNorm1d(10)
|
|
self.bn.weight.data = torch.tensor(np.array([0.1]*10).astype(np.float32))
|
|
self.bn.bias.data = torch.tensor(np.array([0.2]*10).astype(np.float32))
|
|
self.bn.running_mean.data = torch.tensor(np.array([0.3]*10).astype(np.float32))
|
|
self.bn.running_var.data = torch.tensor(np.array([0.4]*10).astype(np.float32))
|
|
|
|
self.bn2 = nn.BatchNorm1d(10)
|
|
self.bn2.weight.data = torch.tensor(np.array([0.5]*10).astype(np.float32))
|
|
self.bn2.bias.data = torch.tensor(np.array([0.6]*10).astype(np.float32))
|
|
self.bn2.running_mean.data = torch.tensor(np.array([0.7]*10).astype(np.float32))
|
|
self.bn2.running_var.data = torch.tensor(np.array([0.8]*10).astype(np.float32))
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.linear(x)
|
|
x = self.bn(x)
|
|
x = self.linear2(x)
|
|
x = self.bn2(x)
|
|
y = x + 128
|
|
z = x + 127
|
|
|
|
|
|
x = x.expand(1,1,1,10)
|
|
x = torch.mul(x, torch.tensor(np.array([0.7]).reshape(1,1,1,1).astype(np.float32)))
|
|
x = x.expand(1,1,1,10)
|
|
x = torch.reshape(x, (1,2,5,1))
|
|
x += 126
|
|
|
|
y1 = y + 125
|
|
y = y.expand(1,1,1,10)
|
|
y = torch.mul(y, y1)
|
|
|
|
z1 = z + 124
|
|
z = z.expand(1,1,1,10)
|
|
z2 = torch.reshape(z, (1, 2, 5, 1))
|
|
z = torch.mul(z, z1)
|
|
|
|
# shape node in expand node is shared weight
|
|
return x, y, z, z2
|
|
|
|
|
|
model = TestCase18_shared_weight_issue()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 10, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase18_shared_weight_issue')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
class TestCase19_shared_weight_issue2(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase19_shared_weight_issue2, self).__init__()
|
|
|
|
self.weight = torch.rand((10,10))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(10,10)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
self.bn = nn.BatchNorm1d(10)
|
|
self.bn.weight.data = torch.tensor(np.array([0.1]*10).astype(np.float32))
|
|
self.bn.bias.data = torch.tensor(np.array([0.2]*10).astype(np.float32))
|
|
self.bn.running_mean.data = torch.tensor(np.array([0.3]*10).astype(np.float32))
|
|
self.bn.running_var.data = torch.tensor(np.array([0.4]*10).astype(np.float32))
|
|
|
|
self.bn2 = nn.BatchNorm1d(10)
|
|
self.bn2.weight.data = torch.tensor(np.array([0.5]*10).astype(np.float32))
|
|
self.bn2.bias.data = torch.tensor(np.array([0.6]*10).astype(np.float32))
|
|
self.bn2.running_mean.data = torch.tensor(np.array([0.7]*10).astype(np.float32))
|
|
self.bn2.running_var.data = torch.tensor(np.array([0.8]*10).astype(np.float32))
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.linear(x)
|
|
y = x * 128
|
|
z = x * 128
|
|
|
|
|
|
x = x.expand(1,1,1,10)
|
|
x = torch.mul(x, torch.tensor(np.array([0.7]).reshape(1,1,1,1).astype(np.float32)))
|
|
x = x.expand(1,1,1,10)
|
|
x = torch.reshape(x, (1,2,5,1))
|
|
x *= 128
|
|
|
|
y1 = y * 128
|
|
#y = y.expand(1,1,1,10)
|
|
y = torch.mul(y, y1)
|
|
|
|
# z1 = z * 128
|
|
# z = z.expand(1,1,1,10)
|
|
# z2 = torch.reshape(z, (1, 2, 5, 1))
|
|
# z = torch.mul(z, z1)
|
|
|
|
# shape node in expand node is shared weight
|
|
return x, y, z
|
|
|
|
|
|
model = TestCase19_shared_weight_issue2()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 10, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase19_shared_weight_issue2')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
class TestCase20_replace_MulDivAddSub_to_Bn(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase20_replace_MulDivAddSub_to_Bn, self).__init__()
|
|
|
|
# add 128 conv
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
mul = x * 128
|
|
add = x + 128
|
|
div = x / 128
|
|
sub = x - 128
|
|
|
|
return mul, add, div, sub
|
|
|
|
|
|
model = TestCase20_replace_MulDivAddSub_to_Bn()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase20_replace_MulDivAddSub_to_Bn')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase21_replace_Shape_to_Constant(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase21_replace_Shape_to_Constant, self).__init__()
|
|
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(20,20), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
self.weight = torch.rand((1800,1800))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(1800,1800)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
x = x.view(x.size(0), -1)
|
|
x = self.linear(x)
|
|
|
|
return x
|
|
|
|
|
|
model = TestCase21_replace_Shape_to_Constant()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase21_replace_Shape_to_Constant')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase22_replace_Split_to_Reshape(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase22_replace_Split_to_Reshape, self).__init__()
|
|
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(20,20), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
self.weight = torch.rand((10, 1800))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(1800, 10)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
|
|
x1, x2 = torch.split(x, 2, dim=1)
|
|
|
|
x = torch.flatten(x, start_dim=1)
|
|
y = self.linear(x)
|
|
y1= torch.split(y, 2, dim=1)
|
|
|
|
return x1, x2, y1
|
|
|
|
|
|
|
|
model = TestCase22_replace_Split_to_Reshape()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase22_replace_Split_to_Reshape')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase23_replace_Reshape_to_Flatten(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase23_replace_Reshape_to_Flatten, self).__init__()
|
|
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(20,20), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
self.weight = torch.rand((10, 1800))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(1800, 10)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
|
|
x = torch.reshape(x, (1,1800) )
|
|
x = self.linear(x)
|
|
|
|
return x
|
|
|
|
|
|
|
|
model = TestCase23_replace_Reshape_to_Flatten()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase23_replace_Reshape_to_Flatten')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase24_replace_Squeeze_like_op_with_Reshape(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase24_replace_Squeeze_like_op_with_Reshape, self).__init__()
|
|
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(20,20), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
x = torch.reshape(x, (1,1, 20, 90) )
|
|
z = torch.unsqueeze(x, 1)
|
|
x = torch.squeeze(x)
|
|
|
|
return x, z
|
|
|
|
|
|
|
|
model = TestCase24_replace_Squeeze_like_op_with_Reshape()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase24_replace_Squeeze_like_op_with_Reshape')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
class TestCase25_replace_AveragePool_with_GlobalAveragePool(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase25_replace_AveragePool_with_GlobalAveragePool, self).__init__()
|
|
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(20,20), padding=(0,0), groups=1)
|
|
self.avgpool1 = nn.AvgPool2d(5, stride=1)
|
|
self.avgpool2 = nn.AvgPool2d((30, 20), stride=(1, 1))
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
x1 = self.avgpool1(x)
|
|
x2 = self.avgpool2(x)
|
|
|
|
return x1, x2
|
|
|
|
|
|
|
|
model = TestCase25_replace_AveragePool_with_GlobalAveragePool()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase25_replace_AveragePool_with_GlobalAveragePool')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase26_replace_dilated_Conv(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase26_replace_dilated_Conv, self).__init__()
|
|
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(5,5), stride=(1,1), padding=(0,0), groups=1, dilation=3)
|
|
self.avgpool1 = nn.AvgPool2d(5, stride=1)
|
|
self.avgpool2 = nn.AvgPool2d((2, 2), stride=(1, 1))
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
x1 = self.avgpool1(x)
|
|
x2 = self.avgpool2(x)
|
|
|
|
return x1, x2
|
|
|
|
|
|
|
|
model = TestCase26_replace_dilated_Conv()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase26_replace_dilated_Conv')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
|
|
class TestCase27_replace_depthwise_1x1_Conv_with_BatchNormalization(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase27_replace_depthwise_1x1_Conv_with_BatchNormalization, self).__init__()
|
|
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
self.avgpool1 = nn.AvgPool2d(5, stride=1)
|
|
self.avgpool2 = nn.AvgPool2d((2, 2), stride=(1, 1))
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
x1 = self.avgpool1(x)
|
|
x2 = self.avgpool2(x)
|
|
|
|
return x1, x2
|
|
|
|
|
|
|
|
model = TestCase27_replace_depthwise_1x1_Conv_with_BatchNormalization()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase27_replace_depthwise_1x1_Conv_with_BatchNormalization')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase28_replace_ReduceMean_with_GlobalAveragePool_handler(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase28_replace_ReduceMean_with_GlobalAveragePool_handler, self).__init__()
|
|
|
|
# add 128 conv
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(1,1), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
|
|
z = x.transpose(1,2)
|
|
z = z.transpose(2,3)
|
|
z = torch.mean(z, dim=[1,2])
|
|
z1 = z + 111
|
|
|
|
x = torch.mean(x, dim=[2,3])
|
|
|
|
x += 128
|
|
|
|
#return x, y, z, z1
|
|
return x, z
|
|
|
|
|
|
model = TestCase28_replace_ReduceMean_with_GlobalAveragePool_handler()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase28_replace_ReduceMean_with_GlobalAveragePool_handler')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase29_replace_Gather_to_Reshape(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase29_replace_Gather_to_Reshape, self).__init__()
|
|
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(20,20), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
self.weight = torch.rand((10, 1800))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(1800, 10)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
x1 = x[0, 1, :, :]
|
|
y = x[0, 2, :, :]
|
|
|
|
|
|
return x1, y
|
|
|
|
|
|
|
|
model = TestCase29_replace_Gather_to_Reshape()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase29_replace_Gather_to_Reshape')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
class TestCase30_defuse_Sub_with_Batchnormalization_and_Add(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase30_defuse_Sub_with_Batchnormalization_and_Add, self).__init__()
|
|
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(20,20), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
self.weight = torch.rand((90, 20))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(20, 90)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
z1 = x + 0.1
|
|
z2 = x + 0.2
|
|
z3 = z2 - z1
|
|
|
|
y = self.linear(x)
|
|
k1 = y + 0.1
|
|
k2 = y + 0.2
|
|
k3 = k2 - k1
|
|
|
|
k31 = k3 + 0.1
|
|
k32 = k3 + 0.2
|
|
|
|
z31 = z3 + 0.1
|
|
z32 = z3 + 0.2
|
|
|
|
return k31, k32, z31, z32
|
|
|
|
|
|
|
|
model = TestCase30_defuse_Sub_with_Batchnormalization_and_Add()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase30_defuse_Sub_with_Batchnormalization_and_Add')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
class TestCase31_defuse_Div_with_Reciprocal_and_Mul(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase31_defuse_Div_with_Reciprocal_and_Mul, self).__init__()
|
|
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(20,20), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
self.weight = torch.rand((90, 20))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(20, 90)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
z1 = x + 0.1
|
|
z2 = x + 0.2
|
|
z3 = z2 / z1
|
|
|
|
y = self.linear(x)
|
|
k1 = y + 0.1
|
|
k2 = y + 0.2
|
|
k3 = k2 / k1
|
|
|
|
k31 = k3 + 0.1
|
|
k32 = k3 + 0.2
|
|
|
|
z31 = z3 + 0.1
|
|
z32 = z3 + 0.2
|
|
|
|
return k31, k32, z31, z32
|
|
|
|
|
|
|
|
model = TestCase31_defuse_Div_with_Reciprocal_and_Mul()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase31_defuse_Div_with_Reciprocal_and_Mul')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
opset_version=11)
|
|
|
|
|
|
class TestCase32_invalid_input_shape(nn.Module):
|
|
def __init__(self):
|
|
super(TestCase32_invalid_input_shape, self).__init__()
|
|
|
|
self.conv = nn.Conv2d(3, 3, kernel_size=(1,1), stride=(20,20), padding=(0,0), groups=3)
|
|
|
|
# weight
|
|
w = torch.tensor( np.array([ 2.0 ]*3).reshape(3,1,1,1) ).float()
|
|
b = torch.tensor(np.array([128]*3)).float()
|
|
self.conv.weight.data = w
|
|
self.conv.bias.data = b
|
|
|
|
self.weight = torch.rand((90, 20))
|
|
self.weight2 = torch.rand((10,10))
|
|
|
|
self.linear = nn.Linear(20, 90)
|
|
self.linear2 = nn.Linear(10,10)
|
|
self.linear.weight.data = self.weight
|
|
self.linear2.weight.data = self.weight2
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.conv(x)
|
|
z1 = x + 0.1
|
|
z2 = x + 0.2
|
|
z3 = z2 / z1
|
|
|
|
y = self.linear(x)
|
|
k1 = y + 0.1
|
|
k2 = y + 0.2
|
|
k3 = k2 / k1
|
|
|
|
k31 = k3 + 0.1
|
|
k32 = k3 + 0.2
|
|
|
|
z31 = z3 + 0.1
|
|
z32 = z3 + 0.2
|
|
|
|
return k31, k32, z31, z32
|
|
|
|
|
|
|
|
model = TestCase32_invalid_input_shape()
|
|
model = model.to(torch.device('cpu'))
|
|
model.eval()
|
|
|
|
dummy_input = torch.randn(1, 3, 600, 400, device="cpu")
|
|
save_path = pwd + "/{}.onnx".format('TestCase32_invalid_input_shape')
|
|
torch.onnx.export(
|
|
model,
|
|
(dummy_input),
|
|
save_path,
|
|
verbose=False,
|
|
keep_initializers_as_inputs=True,
|
|
input_names=["input"],
|
|
dynamic_axes={"input": {0: "batch", 2: "width"}},
|
|
opset_version=11)
|
|
|