mirror of
https://github.com/PaddlePaddle/PaddleOCR.git
synced 2025-06-26 21:24:27 +00:00
Fix typos (#14936)
This commit is contained in:
parent
d28cb46061
commit
33fa33e4a9
@ -64,7 +64,7 @@ x1, y1, x2, y2, x3, y3, x4, y4, annotation
|
||||
1. config the `dataset['train']['dataset'['data_path']'`,`dataset['validate']['dataset'['data_path']`in [config/icdar2015_resnet18_fpn_DBhead_polyLR.yaml](cconfig/icdar2015_resnet18_fpn_DBhead_polyLR.yaml)
|
||||
* . single gpu train
|
||||
```bash
|
||||
bash singlel_gpu_train.sh
|
||||
bash single_gpu_train.sh
|
||||
```
|
||||
* . Multi-gpu training
|
||||
```bash
|
||||
|
@ -4,5 +4,5 @@
|
||||
from .util import *
|
||||
from .metrics import *
|
||||
from .schedulers import *
|
||||
from .cal_recall.script import cal_recall_precison_f1
|
||||
from .cal_recall.script import cal_recall_precision_f1
|
||||
from .ocr_metric import get_metric
|
||||
|
@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Time : 1/16/19 6:40 AM
|
||||
# @Author : zhoujun
|
||||
from .script import cal_recall_precison_f1
|
||||
from .script import cal_recall_precision_f1
|
||||
|
||||
__all__ = ["cal_recall_precison_f1"]
|
||||
__all__ = ["cal_recall_precision_f1"]
|
||||
|
@ -394,7 +394,7 @@ def evaluate_method(gtFilePath, submFilePath, evaluationParams):
|
||||
return resDict
|
||||
|
||||
|
||||
def cal_recall_precison_f1(gt_path, result_path, show_result=False):
|
||||
def cal_recall_precision_f1(gt_path, result_path, show_result=False):
|
||||
p = {"g": gt_path, "s": result_path}
|
||||
result = rrc_evaluation_funcs.main_evaluation(
|
||||
p, default_evaluation_params, validate_data, evaluate_method, show_result
|
||||
|
@ -66,7 +66,7 @@ app主页中有四个按钮,一个下拉列表和一个菜单按钮,他们
|
||||
|
||||
<img src="https://paddleocr.bj.bcebos.com/PP-OCRv2/lite/imgs/run_det_cls_rec.jpg" width="400">
|
||||
|
||||
模型运行完成后,模型和运行状态显示区`STATUS`字段显示了当前模型的运行状态,这里显示为`run model successed`表明模型运行成功。
|
||||
模型运行完成后,模型和运行状态显示区`STATUS`字段显示了当前模型的运行状态,这里显示为`run model succeeded`表明模型运行成功。
|
||||
|
||||
模型的运行结果显示在运行结果显示区,显示格式为
|
||||
```text
|
||||
|
@ -256,7 +256,7 @@ public class MainActivity extends AppCompatActivity {
|
||||
// Load test image from path and run model
|
||||
tvInputSetting.setText("Model: " + modelPath.substring(modelPath.lastIndexOf("/") + 1) + "\nOPENCL: " + cbOpencl.isChecked() + "\nCPU Thread Num: " + cpuThreadNum + "\nCPU Power Mode: " + cpuPowerMode);
|
||||
tvInputSetting.scrollTo(0, 0);
|
||||
tvStatus.setText("STATUS: load model successed");
|
||||
tvStatus.setText("STATUS: load model succeeded");
|
||||
|
||||
}
|
||||
|
||||
@ -265,7 +265,7 @@ public class MainActivity extends AppCompatActivity {
|
||||
}
|
||||
|
||||
public void onRunModelSuccessed() {
|
||||
tvStatus.setText("STATUS: run model successed");
|
||||
tvStatus.setText("STATUS: run model succeeded");
|
||||
// Obtain results and update UI
|
||||
tvInferenceTime.setText("Inference time: " + predictor.inferenceTime() + " ms");
|
||||
Bitmap outputImage = predictor.outputImage();
|
||||
|
@ -314,7 +314,7 @@ class MakeShrinkMap(object):
|
||||
padding = pyclipper.PyclipperOffset()
|
||||
padding.AddPath(subject, pyclipper.JT_ROUND,
|
||||
pyclipper.ET_CLOSEDPOLYGON)
|
||||
shrinked = []
|
||||
shrunk = []
|
||||
|
||||
# Increase the shrink ratio every time we get multiple polygon returned back
|
||||
possible_ratios = np.arange(self.shrink_ratio, 1,
|
||||
@ -323,19 +323,19 @@ class MakeShrinkMap(object):
|
||||
for ratio in possible_ratios:
|
||||
distance = polygon_shape.area * (
|
||||
1 - np.power(ratio, 2)) / polygon_shape.length
|
||||
shrinked = padding.Execute(-distance)
|
||||
if len(shrinked) == 1:
|
||||
shrunk = padding.Execute(-distance)
|
||||
if len(shrunk) == 1:
|
||||
break
|
||||
|
||||
if shrinked == []:
|
||||
if shrunk == []:
|
||||
cv2.fillPoly(mask,
|
||||
polygon.astype(np.int32)[np.newaxis, :, :], 0)
|
||||
ignore_tags[i] = True
|
||||
continue
|
||||
|
||||
for each_shirnk in shrinked:
|
||||
shirnk = np.array(each_shirnk).reshape(-1, 2)
|
||||
cv2.fillPoly(gt, [shirnk.astype(np.int32)], 1)
|
||||
for each_shrink in shrunk:
|
||||
shrink = np.array(each_shrink).reshape(-1, 2)
|
||||
cv2.fillPoly(gt, [shrink.astype(np.int32)], 1)
|
||||
if self.num_classes > 1: # 绘制分类的mask
|
||||
cv2.fillPoly(gt_class, polygon.astype(np.int32)[np.newaxis, :, :], classes[i])
|
||||
|
||||
|
@ -64,7 +64,7 @@ When you tap **Run Model**, the demo executes the corresponding model(s) in your
|
||||
|
||||
<img src="./images/run_det_cls_rec.jpg" width="400">
|
||||
|
||||
The status display area shows the current model status (e.g., `run model successed`), indicating that the model ran successfully. The recognition results are formatted as follows:
|
||||
The status display area shows the current model status (e.g., `run model succeeded`), indicating that the model ran successfully. The recognition results are formatted as follows:
|
||||
|
||||
```text
|
||||
Serial Number: Det: (x1,y1)(x2,y2)(x3,y3)(x4,y4) Rec: Recognized Text, Confidence Score Cls: Classification Label, Classification Score
|
||||
|
@ -63,7 +63,7 @@ app主页中有四个按钮,一个下拉列表和一个菜单按钮,他们
|
||||
|
||||
<img src="./images/run_det_cls_rec.jpg" width="400">
|
||||
|
||||
模型运行完成后,模型和运行状态显示区`STATUS`字段显示了当前模型的运行状态,这里显示为`run model successed`表明模型运行成功。
|
||||
模型运行完成后,模型和运行状态显示区`STATUS`字段显示了当前模型的运行状态,这里显示为`run model succeeded`表明模型运行成功。
|
||||
|
||||
模型的运行结果显示在运行结果显示区,显示格式为
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
The All-in-One development tool [PaddleX](https://github.com/PaddlePaddle/PaddleX/tree/release/3.0-beta1), based on the advanced technology of PaddleOCR, supports **low-code full-process** development capabilities in the OCR field. Through low-code development, simple and efficient model use, combination, and customization can be achieved. This will significantly **reduce the time consumption** of model development, **lower its development difficulty**, and greatly accelerate the application and promotion speed of models in the industry. Features include:
|
||||
|
||||
* 🎨 [**Rich Model One-Click Call**](https://paddlepaddle.github.io/PaddleOCR/latest/en/paddlex/quick_start.html): Integrates **48 models** related to text image intelligent analysis, general OCR, general layout parsing, table recognition, formula recognition, and seal recognition into 10 pipelines, which can be quickly experienced through a simple **Python API one-click call**. In addition, the same set of APIs also supports a total of **200+ models** in image classification, object detection, image segmentation, and time series forcasting, forming 30+ single-function modules, making it convenient for developers to use **model combinations**.
|
||||
* 🎨 [**Rich Model One-Click Call**](https://paddlepaddle.github.io/PaddleOCR/latest/en/paddlex/quick_start.html): Integrates **48 models** related to text image intelligent analysis, general OCR, general layout parsing, table recognition, formula recognition, and seal recognition into 10 pipelines, which can be quickly experienced through a simple **Python API one-click call**. In addition, the same set of APIs also supports a total of **200+ models** in image classification, object detection, image segmentation, and time series forecasting, forming 30+ single-function modules, making it convenient for developers to use **model combinations**.
|
||||
|
||||
* 🚀 [**High Efficiency and Low barrier of entry**](https://paddlepaddle.github.io/PaddleOCR/latest/en/paddlex/overview.html): Provides two methods based on **unified commands** and **GUI** to achieve simple and efficient use, combination, and customization of models. Supports multiple deployment methods such as **high-performance inference, service-oriented deployment, and edge deployment**. Additionally, for various mainstream hardware such as **NVIDIA GPU, Kunlunxin XPU, Ascend NPU, Cambricon MLU, and Haiguang DCU**, models can be developed with **seamless switching**.
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user