Skip to content

Commit

Permalink
add reshape before and after pooling 123d with no batch dimension (#3566
Browse files Browse the repository at this point in the history
)
  • Loading branch information
nihui authored Feb 15, 2022
1 parent 76e32e9 commit 6b2495c
Show file tree
Hide file tree
Showing 26 changed files with 473 additions and 86 deletions.
1 change: 1 addition & 0 deletions tools/pnnx/src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,7 @@ set(pnnx_pass_ncnn_SRCS
pass_ncnn/fuse_deconvolutiondepthwise_activation.cpp
pass_ncnn/fuse_innerproduct_activation.cpp
pass_ncnn/fuse_transpose_matmul.cpp
pass_ncnn/insert_reshape_pooling.cpp

pass_ncnn/F_adaptive_avg_pool1d.cpp
pass_ncnn/F_adaptive_avg_pool2d.cpp
Expand Down
9 changes: 9 additions & 0 deletions tools/pnnx/src/ir.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2419,6 +2419,15 @@ Operator* Graph::new_operator_before(const std::string& type, const std::string&
return op;
}

Operator* Graph::new_operator_after(const std::string& type, const std::string& name, const Operator* cur)
{
Operator* op = new Operator;
op->type = type;
op->name = name;
ops.insert(std::find(ops.begin(), ops.end(), cur) + 1, op);
return op;
}

Operand* Graph::new_operand(const torch::jit::Value* v)
{
Operand* r = new Operand;
Expand Down
2 changes: 2 additions & 0 deletions tools/pnnx/src/ir.h
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,8 @@ class Graph

Operator* new_operator_before(const std::string& type, const std::string& name, const Operator* cur);

Operator* new_operator_after(const std::string& type, const std::string& name, const Operator* cur);

Operand* new_operand(const torch::jit::Value* v);

Operand* new_operand(const std::string& name);
Expand Down
3 changes: 3 additions & 0 deletions tools/pnnx/src/pass_ncnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
#include "pass_ncnn/fuse_deconvolutiondepthwise_activation.h"
#include "pass_ncnn/fuse_innerproduct_activation.h"
#include "pass_ncnn/fuse_transpose_matmul.h"
#include "pass_ncnn/insert_reshape_pooling.h"

#include "pass_level4/dead_code_elimination.h"
#include "pass_level4/canonicalize.h"
Expand Down Expand Up @@ -73,6 +74,8 @@ void pass_ncnn(Graph& g)

ncnn::chain_multi_output(g);

ncnn::insert_reshape_pooling(g);

ncnn::solve_batch_index(g);

ncnn::convert_half_to_float(g);
Expand Down
106 changes: 106 additions & 0 deletions tools/pnnx/src/pass_ncnn/insert_reshape_pooling.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

#include "insert_reshape_pooling.h"
#include "pass_ncnn.h"

namespace pnnx {

namespace ncnn {

void insert_reshape_pooling(Graph& graph)
{
while (1)
{
bool matched = false;

for (size_t i = 0; i < graph.ops.size(); i++)
{
Operator* op = graph.ops[i];

if (op->type != "nn.MaxPool1d" && op->type != "nn.MaxPool2d" && op->type != "nn.MaxPool3d")
continue;

int input_rank = op->inputs[0]->shape.size();
if (input_rank == 0)
continue;

fprintf(stderr, "insert_reshape_pooling %d\n", input_rank);

// nn.MaxPool1d 2d-3d-2d
// nn.MaxPool2d 3d-4d-3d
// nn.MaxPool3d 4d-5d-4d
bool insert_reshape = false;
if ((op->type == "nn.MaxPool1d" && input_rank == 2)
|| (op->type == "nn.MaxPool2d" && input_rank == 3)
|| (op->type == "nn.MaxPool3d" && input_rank == 4))
{
insert_reshape = true;
}

if (!insert_reshape)
continue;

matched = true;

Operand* pooling_in = op->inputs[0];
Operand* pooling_out = op->outputs[0];

Operator* reshape0 = graph.new_operator_before("Tensor.reshape", op->name + "_ncnnreshape0", op);
Operator* reshape1 = graph.new_operator_after("Tensor.reshape", op->name + "_ncnnreshape1", op);

Operand* reshape0_out = graph.new_operand(op->name + "_ncnnreshape0_out");
Operand* reshape1_in = graph.new_operand(op->name + "_ncnnreshape1_in");

reshape0->inputs.push_back(pooling_in);
reshape0->outputs.push_back(reshape0_out);
reshape1->inputs.push_back(reshape1_in);
reshape1->outputs.push_back(pooling_out);

for (size_t j = 0; j < pooling_in->consumers.size(); j++)
{
if (pooling_in->consumers[j] == op)
{
pooling_in->consumers[j] = reshape0;
break;
}
}
pooling_out->producer = reshape1;

op->inputs[0] = reshape0_out;
op->outputs[0] = reshape1_in;

reshape0_out->producer = reshape0;
reshape0_out->consumers.push_back(op);
reshape1_in->producer = op;
reshape1_in->consumers.push_back(reshape1);

std::vector<int> reshape0_shape = pooling_in->shape;
reshape0_shape.insert(reshape0_shape.begin(), 1);
std::vector<int> reshape1_shape = pooling_out->shape;

reshape0->params["shape"] = reshape0_shape;
reshape1->params["shape"] = reshape1_shape;

break;
}

if (!matched)
break;
}
}

} // namespace ncnn

} // namespace pnnx
25 changes: 25 additions & 0 deletions tools/pnnx/src/pass_ncnn/insert_reshape_pooling.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

#include "pass_ncnn.h"

namespace pnnx {

namespace ncnn {

void insert_reshape_pooling(Graph& graph);

} // namespace ncnn

} // namespace pnnx
17 changes: 15 additions & 2 deletions tools/pnnx/tests/ncnn/test_F_avg_pool2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,24 @@ def __init__(self):
super(Model, self).__init__()

def forward(self, x):
y = x.reshape(12, 128, 127)

x = F.avg_pool2d(x, kernel_size=3)
x = F.avg_pool2d(x, kernel_size=4, stride=2, padding=2)
x = F.avg_pool2d(x, kernel_size=(1,3), stride=1, padding=(0,1), ceil_mode=False, count_include_pad=True)
x = F.avg_pool2d(x, kernel_size=(4,5), stride=(1,2), padding=(1,2), ceil_mode=True, count_include_pad=False)
x = F.avg_pool2d(x, kernel_size=(5,3), stride=(2,1), padding=1, ceil_mode=False, count_include_pad=True)
x = F.avg_pool2d(x, kernel_size=2, stride=1, padding=0, ceil_mode=True, count_include_pad=True)
#x = F.avg_pool2d(x, kernel_size=(5,4), stride=1, padding=2, ceil_mode=False, count_include_pad=False, divisor_override=18)
return x

y = F.avg_pool2d(y, kernel_size=3)
y = F.avg_pool2d(y, kernel_size=4, stride=2, padding=2)
y = F.avg_pool2d(y, kernel_size=(1,3), stride=1, padding=(0,1), ceil_mode=False, count_include_pad=True)
y = F.avg_pool2d(y, kernel_size=(4,5), stride=(1,2), padding=(1,2), ceil_mode=True, count_include_pad=False)
y = F.avg_pool2d(y, kernel_size=(5,3), stride=(2,1), padding=1, ceil_mode=False, count_include_pad=True)
y = F.avg_pool2d(y, kernel_size=2, stride=1, padding=0, ceil_mode=True, count_include_pad=True)
#y = F.avg_pool2d(y, kernel_size=(5,4), stride=1, padding=2, ceil_mode=False, count_include_pad=False, divisor_override=18)
return x, y

def test():
net = Model()
Expand All @@ -51,7 +61,10 @@ def test():
import test_F_avg_pool2d_ncnn
b = test_F_avg_pool2d_ncnn.test_inference()

return torch.allclose(a, b, 1e-4, 1e-4)
for a0, b0 in zip(a, b):
if not torch.allclose(a0, b0, 1e-4, 1e-4):
return False
return True

if __name__ == "__main__":
if test():
Expand Down
17 changes: 15 additions & 2 deletions tools/pnnx/tests/ncnn/test_F_avg_pool3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,24 @@ def __init__(self):
super(Model, self).__init__()

def forward(self, x):
y = x.reshape(12, 96, 128, 128)

x = F.avg_pool3d(x, kernel_size=3)
x = F.avg_pool3d(x, kernel_size=4, stride=2, padding=2)
x = F.avg_pool3d(x, kernel_size=(1,2,3), stride=1, padding=(0,1,1), ceil_mode=False, count_include_pad=True)
x = F.avg_pool3d(x, kernel_size=(3,4,5), stride=(1,2,2), padding=(1,1,2), ceil_mode=True, count_include_pad=False)
x = F.avg_pool3d(x, kernel_size=(5,4,3), stride=(2,1,1), padding=1, ceil_mode=False, count_include_pad=True)
x = F.avg_pool3d(x, kernel_size=2, stride=1, padding=0, ceil_mode=True, count_include_pad=True)
#x = F.avg_pool3d(x, kernel_size=(5,4,4), stride=1, padding=2, ceil_mode=False, count_include_pad=False, divisor_override=77)
return x

y = F.avg_pool3d(y, kernel_size=3)
y = F.avg_pool3d(y, kernel_size=4, stride=2, padding=2)
y = F.avg_pool3d(y, kernel_size=(1,2,3), stride=1, padding=(0,1,1), ceil_mode=False, count_include_pad=True)
y = F.avg_pool3d(y, kernel_size=(3,4,5), stride=(1,2,2), padding=(1,1,2), ceil_mode=True, count_include_pad=False)
y = F.avg_pool3d(y, kernel_size=(5,4,3), stride=(2,1,1), padding=1, ceil_mode=False, count_include_pad=True)
y = F.avg_pool3d(y, kernel_size=2, stride=1, padding=0, ceil_mode=True, count_include_pad=True)
#y = F.avg_pool3d(y, kernel_size=(5,4,4), stride=1, padding=2, ceil_mode=False, count_include_pad=False, divisor_override=77)
return x, y

def test():
net = Model()
Expand All @@ -51,7 +61,10 @@ def test():
import test_F_avg_pool3d_ncnn
b = test_F_avg_pool3d_ncnn.test_inference()

return torch.allclose(a, b, 1e-4, 1e-4)
for a0, b0 in zip(a, b):
if not torch.allclose(a0, b0, 1e-4, 1e-4):
return False
return True

if __name__ == "__main__":
if test():
Expand Down
16 changes: 14 additions & 2 deletions tools/pnnx/tests/ncnn/test_F_max_pool1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,22 @@ def __init__(self):
super(Model, self).__init__()

def forward(self, x):
y = x.reshape(12, 128)

x = F.max_pool1d(x, kernel_size=3)
x = F.max_pool1d(x, kernel_size=4, stride=2, padding=2, dilation=1)
x = F.max_pool1d(x, kernel_size=3, stride=1, padding=1, dilation=1, return_indices=False, ceil_mode=False)
x = F.max_pool1d(x, kernel_size=5, stride=2, padding=2, dilation=1, return_indices=False, ceil_mode=True)
x = F.max_pool1d(x, kernel_size=3, stride=1, padding=1, dilation=1, return_indices=False, ceil_mode=False)
x = F.max_pool1d(x, kernel_size=2, stride=1, padding=0, dilation=1, return_indices=False, ceil_mode=True)
return x

y = F.max_pool1d(y, kernel_size=3)
y = F.max_pool1d(y, kernel_size=4, stride=2, padding=2, dilation=1)
y = F.max_pool1d(y, kernel_size=3, stride=1, padding=1, dilation=1, return_indices=False, ceil_mode=False)
y = F.max_pool1d(y, kernel_size=5, stride=2, padding=2, dilation=1, return_indices=False, ceil_mode=True)
y = F.max_pool1d(y, kernel_size=3, stride=1, padding=1, dilation=1, return_indices=False, ceil_mode=False)
y = F.max_pool1d(y, kernel_size=2, stride=1, padding=0, dilation=1, return_indices=False, ceil_mode=True)
return x, y
#x, indices1 = F.max_pool1d(x, kernel_size=2, padding=1, dilation=1, return_indices=True, ceil_mode=False)
#x, indices2 = F.max_pool1d(x, kernel_size=5, stride=1, padding=2, dilation=1, return_indices=True, ceil_mode=True)
#return x, indices1, indices2
Expand All @@ -53,7 +62,10 @@ def test():
import test_F_max_pool1d_ncnn
b = test_F_max_pool1d_ncnn.test_inference()

return torch.allclose(a, b, 1e-4, 1e-4)
for a0, b0 in zip(a, b):
if not torch.allclose(a0, b0, 1e-4, 1e-4):
return False
return True

if __name__ == "__main__":
if test():
Expand Down
16 changes: 14 additions & 2 deletions tools/pnnx/tests/ncnn/test_F_max_pool2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,22 @@ def __init__(self):
super(Model, self).__init__()

def forward(self, x):
y = x.reshape(12, 128, 127)

x = F.max_pool2d(x, kernel_size=3)
x = F.max_pool2d(x, kernel_size=4, stride=2, padding=2, dilation=1)
x = F.max_pool2d(x, kernel_size=(1,3), stride=1, padding=(0,1), dilation=1, return_indices=False, ceil_mode=False)
x = F.max_pool2d(x, kernel_size=(4,5), stride=(1,2), padding=(1,2), dilation=1, return_indices=False, ceil_mode=True)
x = F.max_pool2d(x, kernel_size=(2,3), stride=1, padding=1, dilation=(1,1), return_indices=False, ceil_mode=False)
x = F.max_pool2d(x, kernel_size=2, stride=1, padding=0, dilation=1, return_indices=False, ceil_mode=True)
return x

y = F.max_pool2d(y, kernel_size=3)
y = F.max_pool2d(y, kernel_size=4, stride=2, padding=2, dilation=1)
y = F.max_pool2d(y, kernel_size=(1,3), stride=1, padding=(0,1), dilation=1, return_indices=False, ceil_mode=False)
y = F.max_pool2d(y, kernel_size=(4,5), stride=(1,2), padding=(1,2), dilation=1, return_indices=False, ceil_mode=True)
y = F.max_pool2d(y, kernel_size=(2,3), stride=1, padding=1, dilation=(1,1), return_indices=False, ceil_mode=False)
y = F.max_pool2d(y, kernel_size=2, stride=1, padding=0, dilation=1, return_indices=False, ceil_mode=True)
return x, y
#x, indices1 = F.max_pool2d(x, kernel_size=2, padding=1, dilation=1, return_indices=True, ceil_mode=False)
#x, indices2 = F.max_pool2d(x, kernel_size=(5,4), stride=1, padding=2, dilation=1, return_indices=True, ceil_mode=False)
#return x, indices1, indices2
Expand All @@ -53,7 +62,10 @@ def test():
import test_F_max_pool2d_ncnn
b = test_F_max_pool2d_ncnn.test_inference()

return torch.allclose(a, b, 1e-4, 1e-4)
for a0, b0 in zip(a, b):
if not torch.allclose(a0, b0, 1e-4, 1e-4):
return False
return True

if __name__ == "__main__":
if test():
Expand Down
16 changes: 14 additions & 2 deletions tools/pnnx/tests/ncnn/test_F_max_pool3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,22 @@ def __init__(self):
super(Model, self).__init__()

def forward(self, x):
y = x.reshape(12, 96, 128, 128)

x = F.max_pool3d(x, kernel_size=3)
x = F.max_pool3d(x, kernel_size=4, stride=2, padding=2, dilation=1)
x = F.max_pool3d(x, kernel_size=(1,2,3), stride=1, padding=(0,0,1), dilation=1, return_indices=False, ceil_mode=False)
x = F.max_pool3d(x, kernel_size=(3,4,5), stride=(1,2,2), padding=(1,2,2), dilation=1, return_indices=False, ceil_mode=True)
x = F.max_pool3d(x, kernel_size=(2,3,3), stride=1, padding=1, dilation=(1,1,1), return_indices=False, ceil_mode=False)
x = F.max_pool3d(x, kernel_size=2, stride=1, padding=0, dilation=1, return_indices=False, ceil_mode=True)
return x

y = F.max_pool3d(y, kernel_size=3)
y = F.max_pool3d(y, kernel_size=4, stride=2, padding=2, dilation=1)
y = F.max_pool3d(y, kernel_size=(1,2,3), stride=1, padding=(0,0,1), dilation=1, return_indices=False, ceil_mode=False)
y = F.max_pool3d(y, kernel_size=(3,4,5), stride=(1,2,2), padding=(1,2,2), dilation=1, return_indices=False, ceil_mode=True)
y = F.max_pool3d(y, kernel_size=(2,3,3), stride=1, padding=1, dilation=(1,1,1), return_indices=False, ceil_mode=False)
y = F.max_pool3d(y, kernel_size=2, stride=1, padding=0, dilation=1, return_indices=False, ceil_mode=True)
return x, y
#x, indices = F.max_pool3d(x, kernel_size=(5,4,4), stride=1, padding=2, dilation=1, return_indices=True, ceil_mode=False)
#return x, indices

Expand All @@ -52,7 +61,10 @@ def test():
import test_F_max_pool3d_ncnn
b = test_F_max_pool3d_ncnn.test_inference()

return torch.allclose(a, b, 1e-4, 1e-4)
for a0, b0 in zip(a, b):
if not torch.allclose(a0, b0, 1e-4, 1e-4):
return False
return True

if __name__ == "__main__":
if test():
Expand Down
Loading

0 comments on commit 6b2495c

Please sign in to comment.