mirror of
				https://github.com/PaddlePaddle/PaddleOCR.git
				synced 2025-10-31 17:59:11 +00:00 
			
		
		
		
	Merge pull request #4551 from WenmuZhou/copyright
add refer for some code
This commit is contained in:
		
						commit
						5aa14c5f11
					
				| @ -11,6 +11,10 @@ | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """ | ||||
| This code is refer from: | ||||
| https://github.com/WenmuZhou/DBNet.pytorch/blob/master/data_loader/modules/iaa_augment.py | ||||
| """ | ||||
| from __future__ import absolute_import | ||||
| from __future__ import division | ||||
| from __future__ import print_function | ||||
|  | ||||
| @ -1,4 +1,20 @@ | ||||
| # -*- coding:utf-8 -*-  | ||||
| # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """ | ||||
| This code is refer from: | ||||
| https://github.com/WenmuZhou/DBNet.pytorch/blob/master/data_loader/modules/make_border_map.py | ||||
| """ | ||||
| 
 | ||||
| from __future__ import absolute_import | ||||
| from __future__ import division | ||||
|  | ||||
| @ -1,4 +1,16 @@ | ||||
| # -*- coding:utf-8 -*-  | ||||
| # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| 
 | ||||
| from __future__ import absolute_import | ||||
| from __future__ import division | ||||
| @ -12,12 +24,8 @@ from shapely.geometry import Polygon | ||||
| 
 | ||||
| __all__ = ['MakePseGt'] | ||||
| 
 | ||||
| class MakePseGt(object): | ||||
|     r''' | ||||
|     Making binary mask from detection data with ICDAR format. | ||||
|     Typically following the process of class `MakeICDARData`. | ||||
|     ''' | ||||
| 
 | ||||
| class MakePseGt(object): | ||||
|     def __init__(self, kernel_num=7, size=640, min_shrink_ratio=0.4, **kwargs): | ||||
|         self.kernel_num = kernel_num | ||||
|         self.min_shrink_ratio = min_shrink_ratio | ||||
| @ -38,16 +46,20 @@ class MakePseGt(object): | ||||
|             text_polys *= scale | ||||
| 
 | ||||
|         gt_kernels = [] | ||||
|         for i in range(1,self.kernel_num+1): | ||||
|         for i in range(1, self.kernel_num + 1): | ||||
|             # s1->sn, from big to small | ||||
|             rate = 1.0 - (1.0 - self.min_shrink_ratio) / (self.kernel_num - 1) * i | ||||
|             text_kernel, ignore_tags = self.generate_kernel(image.shape[0:2], rate, text_polys, ignore_tags) | ||||
|             rate = 1.0 - (1.0 - self.min_shrink_ratio) / (self.kernel_num - 1 | ||||
|                                                           ) * i | ||||
|             text_kernel, ignore_tags = self.generate_kernel( | ||||
|                 image.shape[0:2], rate, text_polys, ignore_tags) | ||||
|             gt_kernels.append(text_kernel) | ||||
| 
 | ||||
|         training_mask = np.ones(image.shape[0:2], dtype='uint8') | ||||
|         for i in range(text_polys.shape[0]): | ||||
|             if ignore_tags[i]: | ||||
|                 cv2.fillPoly(training_mask, text_polys[i].astype(np.int32)[np.newaxis, :, :], 0) | ||||
|                 cv2.fillPoly(training_mask, | ||||
|                              text_polys[i].astype(np.int32)[np.newaxis, :, :], | ||||
|                              0) | ||||
| 
 | ||||
|         gt_kernels = np.array(gt_kernels) | ||||
|         gt_kernels[gt_kernels > 0] = 1 | ||||
| @ -59,16 +71,25 @@ class MakePseGt(object): | ||||
|         data['mask'] = training_mask.astype('float32') | ||||
|         return data | ||||
| 
 | ||||
|     def generate_kernel(self, img_size, shrink_ratio, text_polys, ignore_tags=None): | ||||
|     def generate_kernel(self, | ||||
|                         img_size, | ||||
|                         shrink_ratio, | ||||
|                         text_polys, | ||||
|                         ignore_tags=None): | ||||
|         """ | ||||
|         Refer to part of the code: | ||||
|         https://github.com/open-mmlab/mmocr/blob/main/mmocr/datasets/pipelines/textdet_targets/base_textdet_targets.py | ||||
|         """ | ||||
| 
 | ||||
|         h, w = img_size | ||||
|         text_kernel = np.zeros((h, w), dtype=np.float32) | ||||
|         for i, poly in enumerate(text_polys): | ||||
|             polygon = Polygon(poly) | ||||
|             distance = polygon.area * (1 - shrink_ratio * shrink_ratio) / (polygon.length + 1e-6) | ||||
|             distance = polygon.area * (1 - shrink_ratio * shrink_ratio) / ( | ||||
|                 polygon.length + 1e-6) | ||||
|             subject = [tuple(l) for l in poly] | ||||
|             pco = pyclipper.PyclipperOffset() | ||||
|             pco.AddPath(subject, pyclipper.JT_ROUND, | ||||
|                         pyclipper.ET_CLOSEDPOLYGON) | ||||
|             pco.AddPath(subject, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) | ||||
|             shrinked = np.array(pco.Execute(-distance)) | ||||
| 
 | ||||
|             if len(shrinked) == 0 or shrinked.size == 0: | ||||
|  | ||||
| @ -1,4 +1,20 @@ | ||||
| # -*- coding:utf-8 -*-  | ||||
| # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """ | ||||
| This code is refer from: | ||||
| https://github.com/WenmuZhou/DBNet.pytorch/blob/master/data_loader/modules/make_shrink_map.py | ||||
| """ | ||||
| 
 | ||||
| from __future__ import absolute_import | ||||
| from __future__ import division | ||||
|  | ||||
| @ -1,4 +1,20 @@ | ||||
| # -*- coding:utf-8 -*-  | ||||
| # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """ | ||||
| This code is refer from: | ||||
| https://github.com/WenmuZhou/DBNet.pytorch/blob/master/data_loader/modules/random_crop_data.py | ||||
| """ | ||||
| 
 | ||||
| from __future__ import absolute_import | ||||
| from __future__ import division | ||||
|  | ||||
| @ -11,6 +11,10 @@ | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """ | ||||
| This code is refer from: | ||||
| https://github.com/RubanSeven/Text-Image-Augmentation-python/blob/master/augment.py | ||||
| """ | ||||
| 
 | ||||
| import numpy as np | ||||
| from .warp_mls import WarpMLS | ||||
|  | ||||
| @ -11,6 +11,10 @@ | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """ | ||||
| This code is refer from: | ||||
| https://github.com/RubanSeven/Text-Image-Augmentation-python/blob/master/warp_mls.py | ||||
| """ | ||||
| 
 | ||||
| import numpy as np | ||||
| 
 | ||||
|  | ||||
| @ -11,6 +11,10 @@ | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """ | ||||
| This code is refer from: | ||||
| https://github.com/whai362/PSENet/blob/python3/models/head/psenet_head.py | ||||
| """ | ||||
| 
 | ||||
| import paddle | ||||
| from paddle import nn | ||||
|  | ||||
| @ -1,4 +1,4 @@ | ||||
| # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. | ||||
| # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| @ -11,22 +11,24 @@ | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """ | ||||
| This code is refer from: | ||||
| https://github.com/whai362/PSENet/blob/python3/models/head/psenet_head.py | ||||
| """ | ||||
| 
 | ||||
| from paddle import nn | ||||
| 
 | ||||
| 
 | ||||
| class PSEHead(nn.Layer): | ||||
|     def __init__(self, | ||||
|                  in_channels, | ||||
|                  hidden_dim=256, | ||||
|                  out_channels=7, | ||||
|                  **kwargs): | ||||
|     def __init__(self, in_channels, hidden_dim=256, out_channels=7, **kwargs): | ||||
|         super(PSEHead, self).__init__() | ||||
|         self.conv1 = nn.Conv2D(in_channels, hidden_dim, kernel_size=3, stride=1, padding=1) | ||||
|         self.conv1 = nn.Conv2D( | ||||
|             in_channels, hidden_dim, kernel_size=3, stride=1, padding=1) | ||||
|         self.bn1 = nn.BatchNorm2D(hidden_dim) | ||||
|         self.relu1 = nn.ReLU() | ||||
| 
 | ||||
|         self.conv2 = nn.Conv2D(hidden_dim, out_channels, kernel_size=1, stride=1, padding=0) | ||||
| 
 | ||||
|         self.conv2 = nn.Conv2D( | ||||
|             hidden_dim, out_channels, kernel_size=1, stride=1, padding=0) | ||||
| 
 | ||||
|     def forward(self, x, **kwargs): | ||||
|         out = self.conv1(x) | ||||
|  | ||||
| @ -11,16 +11,31 @@ | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """ | ||||
| This code is refer from: | ||||
| https://github.com/whai362/PSENet/blob/python3/models/neck/fpn.py | ||||
| """ | ||||
| 
 | ||||
| import paddle.nn as nn | ||||
| import paddle | ||||
| import math | ||||
| import paddle.nn.functional as F | ||||
| 
 | ||||
| 
 | ||||
| class Conv_BN_ReLU(nn.Layer): | ||||
|     def __init__(self, in_planes, out_planes, kernel_size=1, stride=1, padding=0): | ||||
|     def __init__(self, | ||||
|                  in_planes, | ||||
|                  out_planes, | ||||
|                  kernel_size=1, | ||||
|                  stride=1, | ||||
|                  padding=0): | ||||
|         super(Conv_BN_ReLU, self).__init__() | ||||
|         self.conv = nn.Conv2D(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, | ||||
|         self.conv = nn.Conv2D( | ||||
|             in_planes, | ||||
|             out_planes, | ||||
|             kernel_size=kernel_size, | ||||
|             stride=stride, | ||||
|             padding=padding, | ||||
|             bias_attr=False) | ||||
|         self.bn = nn.BatchNorm2D(out_planes, momentum=0.1) | ||||
|         self.relu = nn.ReLU() | ||||
| @ -28,46 +43,69 @@ class Conv_BN_ReLU(nn.Layer): | ||||
|         for m in self.sublayers(): | ||||
|             if isinstance(m, nn.Conv2D): | ||||
|                 n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels | ||||
|                 m.weight = paddle.create_parameter(shape=m.weight.shape, dtype='float32', default_initializer=paddle.nn.initializer.Normal(0, math.sqrt(2. / n))) | ||||
|                 m.weight = paddle.create_parameter( | ||||
|                     shape=m.weight.shape, | ||||
|                     dtype='float32', | ||||
|                     default_initializer=paddle.nn.initializer.Normal( | ||||
|                         0, math.sqrt(2. / n))) | ||||
|             elif isinstance(m, nn.BatchNorm2D): | ||||
|                 m.weight = paddle.create_parameter(shape=m.weight.shape, dtype='float32', default_initializer=paddle.nn.initializer.Constant(1.0)) | ||||
|                 m.bias = paddle.create_parameter(shape=m.bias.shape, dtype='float32', default_initializer=paddle.nn.initializer.Constant(0.0)) | ||||
|                 m.weight = paddle.create_parameter( | ||||
|                     shape=m.weight.shape, | ||||
|                     dtype='float32', | ||||
|                     default_initializer=paddle.nn.initializer.Constant(1.0)) | ||||
|                 m.bias = paddle.create_parameter( | ||||
|                     shape=m.bias.shape, | ||||
|                     dtype='float32', | ||||
|                     default_initializer=paddle.nn.initializer.Constant(0.0)) | ||||
| 
 | ||||
|     def forward(self, x): | ||||
|         return self.relu(self.bn(self.conv(x))) | ||||
| 
 | ||||
| 
 | ||||
| class FPN(nn.Layer): | ||||
|     def __init__(self, in_channels, out_channels): | ||||
|         super(FPN, self).__init__() | ||||
| 
 | ||||
|         # Top layer | ||||
|         self.toplayer_ = Conv_BN_ReLU(in_channels[3], out_channels, kernel_size=1, stride=1, padding=0) | ||||
|         self.toplayer_ = Conv_BN_ReLU( | ||||
|             in_channels[3], out_channels, kernel_size=1, stride=1, padding=0) | ||||
|         # Lateral layers | ||||
|         self.latlayer1_ = Conv_BN_ReLU(in_channels[2], out_channels, kernel_size=1, stride=1, padding=0) | ||||
|         self.latlayer1_ = Conv_BN_ReLU( | ||||
|             in_channels[2], out_channels, kernel_size=1, stride=1, padding=0) | ||||
| 
 | ||||
|         self.latlayer2_ = Conv_BN_ReLU(in_channels[1], out_channels, kernel_size=1, stride=1, padding=0) | ||||
|         self.latlayer2_ = Conv_BN_ReLU( | ||||
|             in_channels[1], out_channels, kernel_size=1, stride=1, padding=0) | ||||
| 
 | ||||
|         self.latlayer3_ = Conv_BN_ReLU(in_channels[0], out_channels, kernel_size=1, stride=1, padding=0) | ||||
|         self.latlayer3_ = Conv_BN_ReLU( | ||||
|             in_channels[0], out_channels, kernel_size=1, stride=1, padding=0) | ||||
| 
 | ||||
|         # Smooth layers | ||||
|         self.smooth1_ = Conv_BN_ReLU(out_channels, out_channels, kernel_size=3, stride=1, padding=1) | ||||
|         self.smooth1_ = Conv_BN_ReLU( | ||||
|             out_channels, out_channels, kernel_size=3, stride=1, padding=1) | ||||
| 
 | ||||
|         self.smooth2_ = Conv_BN_ReLU(out_channels, out_channels, kernel_size=3, stride=1, padding=1) | ||||
| 
 | ||||
|         self.smooth3_ = Conv_BN_ReLU(out_channels, out_channels, kernel_size=3, stride=1, padding=1) | ||||
|         self.smooth2_ = Conv_BN_ReLU( | ||||
|             out_channels, out_channels, kernel_size=3, stride=1, padding=1) | ||||
| 
 | ||||
|         self.smooth3_ = Conv_BN_ReLU( | ||||
|             out_channels, out_channels, kernel_size=3, stride=1, padding=1) | ||||
| 
 | ||||
|         self.out_channels = out_channels * 4 | ||||
|         for m in self.sublayers(): | ||||
|             if isinstance(m, nn.Conv2D): | ||||
|                 n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels | ||||
|                 m.weight = paddle.create_parameter(shape=m.weight.shape, dtype='float32', | ||||
|                                                    default_initializer=paddle.nn.initializer.Normal(0, | ||||
|                                                                                                     math.sqrt(2. / n))) | ||||
|                 m.weight = paddle.create_parameter( | ||||
|                     shape=m.weight.shape, | ||||
|                     dtype='float32', | ||||
|                     default_initializer=paddle.nn.initializer.Normal( | ||||
|                         0, math.sqrt(2. / n))) | ||||
|             elif isinstance(m, nn.BatchNorm2D): | ||||
|                 m.weight = paddle.create_parameter(shape=m.weight.shape, dtype='float32', | ||||
|                 m.weight = paddle.create_parameter( | ||||
|                     shape=m.weight.shape, | ||||
|                     dtype='float32', | ||||
|                     default_initializer=paddle.nn.initializer.Constant(1.0)) | ||||
|                 m.bias = paddle.create_parameter(shape=m.bias.shape, dtype='float32', | ||||
|                 m.bias = paddle.create_parameter( | ||||
|                     shape=m.bias.shape, | ||||
|                     dtype='float32', | ||||
|                     default_initializer=paddle.nn.initializer.Constant(0.0)) | ||||
| 
 | ||||
|     def _upsample(self, x, scale=1): | ||||
| @ -81,15 +119,15 @@ class FPN(nn.Layer): | ||||
|         p5 = self.toplayer_(f5) | ||||
| 
 | ||||
|         f4 = self.latlayer1_(f4) | ||||
|         p4 = self._upsample_add(p5, f4,2) | ||||
|         p4 = self._upsample_add(p5, f4, 2) | ||||
|         p4 = self.smooth1_(p4) | ||||
| 
 | ||||
|         f3 = self.latlayer2_(f3) | ||||
|         p3 = self._upsample_add(p4, f3,2) | ||||
|         p3 = self._upsample_add(p4, f3, 2) | ||||
|         p3 = self.smooth2_(p3) | ||||
| 
 | ||||
|         f2 = self.latlayer3_(f2) | ||||
|         p2 = self._upsample_add(p3, f2,2) | ||||
|         p2 = self._upsample_add(p3, f2, 2) | ||||
|         p2 = self.smooth3_(p2) | ||||
| 
 | ||||
|         p3 = self._upsample(p3, 2) | ||||
|  | ||||
| @ -1,5 +1,6 @@ | ||||
| ## 编译 | ||||
| code from https://github.com/whai362/pan_pp.pytorch | ||||
| This code is refer from: | ||||
| https://github.com/whai362/PSENet/blob/python3/models/post_processing/pse | ||||
| ```python | ||||
| python3 setup.py build_ext --inplace | ||||
| ``` | ||||
|  | ||||
| @ -1,4 +1,4 @@ | ||||
| # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. | ||||
| # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| @ -11,6 +11,10 @@ | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """ | ||||
| This code is refer from: | ||||
| https://github.com/whai362/PSENet/blob/python3/models/head/psenet_head.py | ||||
| """ | ||||
| 
 | ||||
| from __future__ import absolute_import | ||||
| from __future__ import division | ||||
| @ -47,7 +51,8 @@ class PSEPostProcess(object): | ||||
|         pred = outs_dict['maps'] | ||||
|         if not isinstance(pred, paddle.Tensor): | ||||
|             pred = paddle.to_tensor(pred) | ||||
|         pred = F.interpolate(pred, scale_factor=4 // self.scale, mode='bilinear') | ||||
|         pred = F.interpolate( | ||||
|             pred, scale_factor=4 // self.scale, mode='bilinear') | ||||
| 
 | ||||
|         score = F.sigmoid(pred[:, 0, :, :]) | ||||
| 
 | ||||
| @ -60,7 +65,9 @@ class PSEPostProcess(object): | ||||
| 
 | ||||
|         boxes_batch = [] | ||||
|         for batch_index in range(pred.shape[0]): | ||||
|             boxes, scores = self.boxes_from_bitmap(score[batch_index], kernels[batch_index], shape_list[batch_index]) | ||||
|             boxes, scores = self.boxes_from_bitmap(score[batch_index], | ||||
|                                                    kernels[batch_index], | ||||
|                                                    shape_list[batch_index]) | ||||
| 
 | ||||
|             boxes_batch.append({'points': boxes, 'scores': scores}) | ||||
|         return boxes_batch | ||||
| @ -98,15 +105,14 @@ class PSEPostProcess(object): | ||||
|                 mask = np.zeros((box_height, box_width), np.uint8) | ||||
|                 mask[points[:, 1], points[:, 0]] = 255 | ||||
| 
 | ||||
|                 contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) | ||||
|                 contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, | ||||
|                                                cv2.CHAIN_APPROX_SIMPLE) | ||||
|                 bbox = np.squeeze(contours[0], 1) | ||||
|             else: | ||||
|                 raise NotImplementedError | ||||
| 
 | ||||
|             bbox[:, 0] = np.clip( | ||||
|                 np.round(bbox[:, 0] / ratio_w), 0, src_w) | ||||
|             bbox[:, 1] = np.clip( | ||||
|                 np.round(bbox[:, 1] / ratio_h), 0, src_h) | ||||
|             bbox[:, 0] = np.clip(np.round(bbox[:, 0] / ratio_w), 0, src_w) | ||||
|             bbox[:, 1] = np.clip(np.round(bbox[:, 1] / ratio_h), 0, src_h) | ||||
|             boxes.append(bbox) | ||||
|             scores.append(score_i) | ||||
|         return boxes, scores | ||||
|  | ||||
| @ -1,4 +1,4 @@ | ||||
| # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. | ||||
| # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| @ -11,18 +11,23 @@ | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """ | ||||
| This code is refer from: | ||||
| https://github.com/whai362/PSENet/blob/python3/models/loss/iou.py | ||||
| """ | ||||
| 
 | ||||
| import paddle | ||||
| 
 | ||||
| EPS = 1e-6 | ||||
| 
 | ||||
| 
 | ||||
| def iou_single(a, b, mask, n_class): | ||||
|     valid = mask == 1 | ||||
|     a = a.masked_select(valid) | ||||
|     b = b.masked_select(valid) | ||||
|     miou = [] | ||||
|     for i in range(n_class): | ||||
|         if a.shape == [0] and a.shape==b.shape: | ||||
|         if a.shape == [0] and a.shape == b.shape: | ||||
|             inter = paddle.to_tensor(0.0) | ||||
|             union = paddle.to_tensor(0.0) | ||||
|         else: | ||||
| @ -32,6 +37,7 @@ def iou_single(a, b, mask, n_class): | ||||
|     miou = sum(miou) / len(miou) | ||||
|     return miou | ||||
| 
 | ||||
| 
 | ||||
| def iou(a, b, mask, n_class=2, reduce=True): | ||||
|     batch_size = a.shape[0] | ||||
| 
 | ||||
| @ -39,7 +45,7 @@ def iou(a, b, mask, n_class=2, reduce=True): | ||||
|     b = b.reshape([batch_size, -1]) | ||||
|     mask = mask.reshape([batch_size, -1]) | ||||
| 
 | ||||
|     iou = paddle.zeros((batch_size,), dtype='float32') | ||||
|     iou = paddle.zeros((batch_size, ), dtype='float32') | ||||
|     for i in range(batch_size): | ||||
|         iou[i] = iou_single(a[i], b[i], mask[i], n_class) | ||||
| 
 | ||||
|  | ||||
| @ -1,4 +1,4 @@ | ||||
| # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. | ||||
| # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| @ -11,6 +11,10 @@ | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """ | ||||
| This code is refer from: | ||||
| https://github.com/WenmuZhou/PytorchOCR/blob/master/torchocr/utils/logging.py | ||||
| """ | ||||
| 
 | ||||
| import os | ||||
| import sys | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user