mirror of
				https://github.com/PaddlePaddle/PaddleOCR.git
				synced 2025-11-04 03:39:22 +00:00 
			
		
		
		
	
						commit
						0ea2fb5218
					
				@ -307,21 +307,10 @@ RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img,
 | 
				
			|||||||
  return filter_boxes;
 | 
					  return filter_boxes;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
std::shared_ptr<PaddlePredictor> loadModel(std::string model_file, std::string power_mode, int num_threads) {
 | 
					std::shared_ptr<PaddlePredictor> loadModel(std::string model_file, int num_threads) {
 | 
				
			||||||
  MobileConfig config;
 | 
					  MobileConfig config;
 | 
				
			||||||
  config.set_model_from_file(model_file);
 | 
					  config.set_model_from_file(model_file);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  if (power_mode == "LITE_POWER_HIGH"){
 | 
					 | 
				
			||||||
      config.set_power_mode(LITE_POWER_HIGH);
 | 
					 | 
				
			||||||
  } else {
 | 
					 | 
				
			||||||
      if (power_mode == "LITE_POWER_LOW") {
 | 
					 | 
				
			||||||
          config.set_power_mode(LITE_POWER_HIGH);
 | 
					 | 
				
			||||||
      } else {
 | 
					 | 
				
			||||||
          std::cerr << "Only support LITE_POWER_HIGH or LITE_POWER_HIGH." << std::endl;
 | 
					 | 
				
			||||||
          exit(1);
 | 
					 | 
				
			||||||
      }
 | 
					 | 
				
			||||||
  }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
  config.set_threads(num_threads);
 | 
					  config.set_threads(num_threads);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  std::shared_ptr<PaddlePredictor> predictor =
 | 
					  std::shared_ptr<PaddlePredictor> predictor =
 | 
				
			||||||
@ -391,7 +380,7 @@ void check_params(int argc, char **argv) {
 | 
				
			|||||||
  if (strcmp(argv[1], "det") == 0) {
 | 
					  if (strcmp(argv[1], "det") == 0) {
 | 
				
			||||||
      if (argc < 9){
 | 
					      if (argc < 9){
 | 
				
			||||||
        std::cerr << "[ERROR] usage:" << argv[0]
 | 
					        std::cerr << "[ERROR] usage:" << argv[0]
 | 
				
			||||||
                  << " det det_model num_threads batchsize power_mode img_dir det_config lite_benchmark_value" << std::endl;
 | 
					                  << " det det_model runtime_device num_threads batchsize img_dir det_config lite_benchmark_value" << std::endl;
 | 
				
			||||||
        exit(1);
 | 
					        exit(1);
 | 
				
			||||||
      }
 | 
					      }
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
@ -399,7 +388,7 @@ void check_params(int argc, char **argv) {
 | 
				
			|||||||
  if (strcmp(argv[1], "rec") == 0) {
 | 
					  if (strcmp(argv[1], "rec") == 0) {
 | 
				
			||||||
      if (argc < 9){
 | 
					      if (argc < 9){
 | 
				
			||||||
        std::cerr << "[ERROR] usage:" << argv[0]
 | 
					        std::cerr << "[ERROR] usage:" << argv[0]
 | 
				
			||||||
                  << " rec rec_model num_threads batchsize power_mode img_dir key_txt lite_benchmark_value" << std::endl;
 | 
					                  << " rec rec_model runtime_device num_threads batchsize img_dir key_txt lite_benchmark_value" << std::endl;
 | 
				
			||||||
        exit(1);
 | 
					        exit(1);
 | 
				
			||||||
      }
 | 
					      }
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
@ -407,7 +396,7 @@ void check_params(int argc, char **argv) {
 | 
				
			|||||||
  if (strcmp(argv[1], "system") == 0) {
 | 
					  if (strcmp(argv[1], "system") == 0) {
 | 
				
			||||||
      if (argc < 12){
 | 
					      if (argc < 12){
 | 
				
			||||||
        std::cerr << "[ERROR] usage:" << argv[0]
 | 
					        std::cerr << "[ERROR] usage:" << argv[0]
 | 
				
			||||||
                  << " system det_model rec_model clas_model num_threads batchsize power_mode img_dir det_config key_txt lite_benchmark_value" << std::endl;
 | 
					                  << " system det_model rec_model clas_model runtime_device num_threads batchsize img_dir det_config key_txt lite_benchmark_value" << std::endl;
 | 
				
			||||||
        exit(1);
 | 
					        exit(1);
 | 
				
			||||||
      }
 | 
					      }
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
@ -417,15 +406,15 @@ void system(char **argv){
 | 
				
			|||||||
  std::string det_model_file = argv[2];
 | 
					  std::string det_model_file = argv[2];
 | 
				
			||||||
  std::string rec_model_file = argv[3];
 | 
					  std::string rec_model_file = argv[3];
 | 
				
			||||||
  std::string cls_model_file = argv[4];
 | 
					  std::string cls_model_file = argv[4];
 | 
				
			||||||
  std::string precision = argv[5];
 | 
					  std::string runtime_device = argv[5];
 | 
				
			||||||
  std::string num_threads = argv[6];
 | 
					  std::string precision = argv[6];
 | 
				
			||||||
  std::string batchsize = argv[7];
 | 
					  std::string num_threads = argv[7];
 | 
				
			||||||
  std::string power_mode = argv[8];
 | 
					  std::string batchsize = argv[8];
 | 
				
			||||||
  std::string img_dir = argv[9];
 | 
					  std::string img_dir = argv[9];
 | 
				
			||||||
  std::string det_config_path = argv[10];
 | 
					  std::string det_config_path = argv[10];
 | 
				
			||||||
  std::string dict_path = argv[11];
 | 
					  std::string dict_path = argv[11];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  if (strcmp(argv[5], "FP32") != 0 && strcmp(argv[5], "INT8") != 0) {
 | 
					  if (strcmp(argv[6], "FP32") != 0 && strcmp(argv[6], "INT8") != 0) {
 | 
				
			||||||
      std::cerr << "Only support FP32 or INT8." << std::endl;
 | 
					      std::cerr << "Only support FP32 or INT8." << std::endl;
 | 
				
			||||||
      exit(1);
 | 
					      exit(1);
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
@ -441,9 +430,9 @@ void system(char **argv){
 | 
				
			|||||||
  charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc
 | 
					  charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc
 | 
				
			||||||
  charactor_dict.push_back(" ");
 | 
					  charactor_dict.push_back(" ");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  auto det_predictor = loadModel(det_model_file, power_mode, std::stoi(num_threads));
 | 
					  auto det_predictor = loadModel(det_model_file, std::stoi(num_threads));
 | 
				
			||||||
  auto rec_predictor = loadModel(rec_model_file, power_mode, std::stoi(num_threads));
 | 
					  auto rec_predictor = loadModel(rec_model_file, std::stoi(num_threads));
 | 
				
			||||||
  auto cls_predictor = loadModel(cls_model_file, power_mode, std::stoi(num_threads));
 | 
					  auto cls_predictor = loadModel(cls_model_file, std::stoi(num_threads));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  for (int i = 0; i < cv_all_img_names.size(); ++i) {
 | 
					  for (int i = 0; i < cv_all_img_names.size(); ++i) {
 | 
				
			||||||
    std::cout << "The predict img: " << cv_all_img_names[i] << std::endl;
 | 
					    std::cout << "The predict img: " << cv_all_img_names[i] << std::endl;
 | 
				
			||||||
@ -477,14 +466,14 @@ void system(char **argv){
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
void det(int argc, char **argv) {
 | 
					void det(int argc, char **argv) {
 | 
				
			||||||
  std::string det_model_file = argv[2];
 | 
					  std::string det_model_file = argv[2];
 | 
				
			||||||
  std::string precision = argv[3];
 | 
					  std::string runtime_device = argv[3];
 | 
				
			||||||
  std::string num_threads = argv[4];
 | 
					  std::string precision = argv[4];
 | 
				
			||||||
  std::string batchsize = argv[5];
 | 
					  std::string num_threads = argv[5];
 | 
				
			||||||
  std::string power_mode = argv[6];
 | 
					  std::string batchsize = argv[6];
 | 
				
			||||||
  std::string img_dir = argv[7];
 | 
					  std::string img_dir = argv[7];
 | 
				
			||||||
  std::string det_config_path = argv[8];
 | 
					  std::string det_config_path = argv[8];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  if (strcmp(argv[3], "FP32") != 0 && strcmp(argv[3], "INT8") != 0) {
 | 
					  if (strcmp(argv[4], "FP32") != 0 && strcmp(argv[4], "INT8") != 0) {
 | 
				
			||||||
      std::cerr << "Only support FP32 or INT8." << std::endl;
 | 
					      std::cerr << "Only support FP32 or INT8." << std::endl;
 | 
				
			||||||
      exit(1);
 | 
					      exit(1);
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
@ -495,7 +484,7 @@ void det(int argc, char **argv) {
 | 
				
			|||||||
  //// load config from txt file
 | 
					  //// load config from txt file
 | 
				
			||||||
  auto Config = LoadConfigTxt(det_config_path);
 | 
					  auto Config = LoadConfigTxt(det_config_path);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  auto det_predictor = loadModel(det_model_file, power_mode, std::stoi(num_threads));
 | 
					  auto det_predictor = loadModel(det_model_file, std::stoi(num_threads));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  std::vector<double> time_info = {0, 0, 0};
 | 
					  std::vector<double> time_info = {0, 0, 0};
 | 
				
			||||||
  for (int i = 0; i < cv_all_img_names.size(); ++i) {
 | 
					  for (int i = 0; i < cv_all_img_names.size(); ++i) {
 | 
				
			||||||
@ -530,14 +519,11 @@ void det(int argc, char **argv) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
  if (strcmp(argv[9], "True") == 0) {
 | 
					  if (strcmp(argv[9], "True") == 0) {
 | 
				
			||||||
    AutoLogger autolog(det_model_file, 
 | 
					    AutoLogger autolog(det_model_file, 
 | 
				
			||||||
                       0,
 | 
					                       runtime_device,
 | 
				
			||||||
                       0,
 | 
					 | 
				
			||||||
                       0,
 | 
					 | 
				
			||||||
                       std::stoi(num_threads),
 | 
					                       std::stoi(num_threads),
 | 
				
			||||||
                       std::stoi(batchsize), 
 | 
					                       std::stoi(batchsize), 
 | 
				
			||||||
                       "dynamic", 
 | 
					                       "dynamic", 
 | 
				
			||||||
                       precision, 
 | 
					                       precision, 
 | 
				
			||||||
                       power_mode,
 | 
					 | 
				
			||||||
                       time_info, 
 | 
					                       time_info, 
 | 
				
			||||||
                       cv_all_img_names.size());
 | 
					                       cv_all_img_names.size());
 | 
				
			||||||
    autolog.report();
 | 
					    autolog.report();
 | 
				
			||||||
@ -546,14 +532,14 @@ void det(int argc, char **argv) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
void rec(int argc, char **argv) {
 | 
					void rec(int argc, char **argv) {
 | 
				
			||||||
  std::string rec_model_file = argv[2];
 | 
					  std::string rec_model_file = argv[2];
 | 
				
			||||||
  std::string precision = argv[3];
 | 
					  std::string runtime_device = argv[3];
 | 
				
			||||||
  std::string num_threads = argv[4];
 | 
					  std::string precision = argv[4];
 | 
				
			||||||
  std::string batchsize = argv[5];
 | 
					  std::string num_threads = argv[5];
 | 
				
			||||||
  std::string power_mode = argv[6];
 | 
					  std::string batchsize = argv[6];
 | 
				
			||||||
  std::string img_dir = argv[7];
 | 
					  std::string img_dir = argv[7];
 | 
				
			||||||
  std::string dict_path = argv[8];
 | 
					  std::string dict_path = argv[8];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  if (strcmp(argv[3], "FP32") != 0 && strcmp(argv[3], "INT8") != 0) {
 | 
					  if (strcmp(argv[4], "FP32") != 0 && strcmp(argv[4], "INT8") != 0) {
 | 
				
			||||||
      std::cerr << "Only support FP32 or INT8." << std::endl;
 | 
					      std::cerr << "Only support FP32 or INT8." << std::endl;
 | 
				
			||||||
      exit(1);
 | 
					      exit(1);
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
@ -565,7 +551,7 @@ void rec(int argc, char **argv) {
 | 
				
			|||||||
  charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc
 | 
					  charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc
 | 
				
			||||||
  charactor_dict.push_back(" ");
 | 
					  charactor_dict.push_back(" ");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  auto rec_predictor = loadModel(rec_model_file, power_mode, std::stoi(num_threads));
 | 
					  auto rec_predictor = loadModel(rec_model_file, std::stoi(num_threads));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  std::shared_ptr<PaddlePredictor> cls_predictor;
 | 
					  std::shared_ptr<PaddlePredictor> cls_predictor;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -603,14 +589,11 @@ void rec(int argc, char **argv) {
 | 
				
			|||||||
  // TODO: support autolog
 | 
					  // TODO: support autolog
 | 
				
			||||||
  if (strcmp(argv[9], "True") == 0) {
 | 
					  if (strcmp(argv[9], "True") == 0) {
 | 
				
			||||||
    AutoLogger autolog(rec_model_file, 
 | 
					    AutoLogger autolog(rec_model_file, 
 | 
				
			||||||
                       0,
 | 
					                       runtime_device,
 | 
				
			||||||
                       0,
 | 
					 | 
				
			||||||
                       0,
 | 
					 | 
				
			||||||
                       std::stoi(num_threads),
 | 
					                       std::stoi(num_threads),
 | 
				
			||||||
                       std::stoi(batchsize), 
 | 
					                       std::stoi(batchsize), 
 | 
				
			||||||
                       "dynamic", 
 | 
					                       "dynamic", 
 | 
				
			||||||
                       precision, 
 | 
					                       precision, 
 | 
				
			||||||
                       power_mode,
 | 
					 | 
				
			||||||
                       time_info, 
 | 
					                       time_info, 
 | 
				
			||||||
                       cv_all_img_names.size());
 | 
					                       cv_all_img_names.size());
 | 
				
			||||||
    autolog.report();
 | 
					    autolog.report();
 | 
				
			||||||
 | 
				
			|||||||
@ -0,0 +1,12 @@
 | 
				
			|||||||
 | 
					===========================lite_params===========================
 | 
				
			||||||
 | 
					inference:./ocr_db_crnn det
 | 
				
			||||||
 | 
					infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
 | 
				
			||||||
 | 
					runtime_device:ARM_CPU
 | 
				
			||||||
 | 
					--cpu_threads:1|4
 | 
				
			||||||
 | 
					--det_batch_size:1
 | 
				
			||||||
 | 
					--rec_batch_size:1
 | 
				
			||||||
 | 
					--system_batch_size:1
 | 
				
			||||||
 | 
					--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
 | 
				
			||||||
 | 
					--config_dir:./config.txt
 | 
				
			||||||
 | 
					--rec_dict_dir:./ppocr_keys_v1.txt
 | 
				
			||||||
 | 
					--benchmark:True
 | 
				
			||||||
										
											Binary file not shown.
										
									
								
							| 
		 Before Width: | Height: | Size: 290 KiB After Width: | Height: | Size: 210 KiB  | 
										
											Binary file not shown.
										
									
								
							| 
		 Before Width: | Height: | Size: 776 KiB After Width: | Height: | Size: 169 KiB  | 
@ -1,72 +0,0 @@
 | 
				
			|||||||
# Lite预测功能测试
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
Lite预测功能测试的主程序为`test_lite.sh`,可以测试基于Lite预测库的模型推理功能。
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
## 1. 测试结论汇总
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
目前Lite端的样本间支持以方式的组合:
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
**字段说明:**
 | 
					 | 
				
			||||||
- 输入设置:包括C++预测、python预测、java预测
 | 
					 | 
				
			||||||
- 模型类型:包括正常模型(FP32)和量化模型(FP16)
 | 
					 | 
				
			||||||
- batch-size:包括1和4
 | 
					 | 
				
			||||||
- predictor数量:包括多predictor预测和单predictor预测
 | 
					 | 
				
			||||||
- 功耗模式:包括高性能模式(LITE_POWER_HIGH)和省电模式(LITE_POWER_LOW)
 | 
					 | 
				
			||||||
- 预测库来源:包括下载方式和编译方式,其中编译方式分为以下目标硬件:(1)ARM CPU;(2)Linux XPU;(3)OpenCL GPU;(4)Metal GPU
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
| 模型类型 | batch-size | predictor数量 | 功耗模式 | 预测库来源 | 支持语言 |
 | 
					 | 
				
			||||||
|  :----:   |  :----:   |  :----:  |  :----:  |  :----:  |  :----:  |
 | 
					 | 
				
			||||||
| 正常模型/量化模型 | 1 | 1 | 高性能模式/省电模式 | 下载方式 | C++预测 |
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
## 2. 测试流程
 | 
					 | 
				
			||||||
运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### 2.1 功能测试
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
先运行`prepare.sh`准备数据和模型,模型和数据会打包到test_lite.tar中,将test_lite.tar上传到手机上,解压后进`入test_lite`目录中,然后运行`test_lite.sh`进行测试,最终在`test_lite/output`目录下生成`lite_*.log`后缀的日志文件。
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
```shell
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# 数据和模型准备
 | 
					 | 
				
			||||||
bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt "lite_infer"
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# 手机端测试:
 | 
					 | 
				
			||||||
bash test_lite.sh ppocr_det_mobile_params.txt
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
```  
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
**注意**:由于运行该项目需要bash等命令,传统的adb方式不能很好的安装。所以此处推荐通在手机上开启虚拟终端的方式连接电脑,连接方式可以参考[安卓手机termux连接电脑](./termux_for_android.md)。
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#### 运行结果
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
各测试的运行情况会打印在 `./output/` 中:
 | 
					 | 
				
			||||||
运行成功时会输出:
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
```
 | 
					 | 
				
			||||||
Run successfully with command - ./ocr_db_crnn det ./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb INT8 4 1 LITE_POWER_LOW ./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg ./config.txt True > ./output/lite_ch_ppocr_mobile_v2.0_det_slim_opt.nb_precision_INT8_batchsize_1_threads_4_powermode_LITE_POWER_LOW_singleimg_True.log 2>&1!
 | 
					 | 
				
			||||||
Run successfully with command xxx
 | 
					 | 
				
			||||||
...
 | 
					 | 
				
			||||||
```
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
运行失败时会输出:
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
```
 | 
					 | 
				
			||||||
Run failed with command - ./ocr_db_crnn det ./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb INT8 4 1 LITE_POWER_LOW ./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg ./config.txt True > ./output/lite_ch_ppocr_mobile_v2.0_det_slim_opt.nb_precision_INT8_batchsize_1_threads_4_powermode_LITE_POWER_LOW_singleimg_True.log 2>&1!
 | 
					 | 
				
			||||||
Run failed with command xxx
 | 
					 | 
				
			||||||
...
 | 
					 | 
				
			||||||
```
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
在./output/文件夹下,会存在如下日志,每一个日志都是不同配置下的log结果:
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
<img src="lite_log.png" width="1000">
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
在每一个log中,都会调用autolog打印如下信息:
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
<img src="lite_auto_log.png" width="1000">
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
## 3. 更多教程
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
本文档为功能测试用,更详细的Lite端预测使用教程请参考:[Lite端部署](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/lite/readme.md)。
 | 
					 | 
				
			||||||
							
								
								
									
										71
									
								
								test_tipc/docs/test_lite_arm_cpu_cpp.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								test_tipc/docs/test_lite_arm_cpu_cpp.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,71 @@
 | 
				
			|||||||
 | 
					# Lite\_arm\_cpu\_cpp预测功能测试
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Lite\_arm\_cpu\_cpp预测功能测试的主程序为`test_lite_arm_cpu_cpp.sh`,可以在ARM CPU上基于Lite预测库测试模型的C++推理功能。
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## 1. 测试结论汇总
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					目前Lite端的样本间支持以方式的组合:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					**字段说明:**
 | 
				
			||||||
 | 
					- 模型类型:包括正常模型(FP32)和量化模型(INT8)
 | 
				
			||||||
 | 
					- batch-size:包括1和4
 | 
				
			||||||
 | 
					- threads:包括1和4
 | 
				
			||||||
 | 
					- predictor数量:包括多predictor预测和单predictor预测
 | 
				
			||||||
 | 
					- 预测库来源:包括下载方式和编译方式
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					| 模型类型 | batch-size | threads | predictor数量 | 预测库来源 |
 | 
				
			||||||
 | 
					|  :----:   |  :----:  | :----:  |  :----:  |  :----:  |
 | 
				
			||||||
 | 
					| 正常模型/量化模型 | 1 | 1/4 |  1 | 下载方式 |
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## 2. 测试流程
 | 
				
			||||||
 | 
					运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### 2.1 功能测试
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					先运行`prepare_lite.sh`,运行后会在当前路径下生成`test_lite.tar`,其中包含了测试数据、测试模型和用于预测的可执行文件。将`test_lite.tar`上传到被测试的手机上,在手机的终端解压该文件,进入`test_lite`目录中,然后运行`test_lite_arm_cpu_cpp.sh`进行测试,最终在`test_lite/output`目录下生成`lite_*.log`后缀的日志文件。
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```shell
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# 数据和模型准备
 | 
				
			||||||
 | 
					bash test_tipc/prepare_lite.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# 手机端测试:
 | 
				
			||||||
 | 
					bash test_lite_arm_cpu_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```  
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					**注意**:由于运行该项目需要bash等命令,传统的adb方式不能很好的安装。所以此处推荐通在手机上开启虚拟终端的方式连接电脑,连接方式可以参考[安卓手机termux连接电脑](./termux_for_android.md)。
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#### 运行结果
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					各测试的运行情况会打印在 `./output/` 中:
 | 
				
			||||||
 | 
					运行成功时会输出:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					Run successfully with command - ./ocr_db_crnn det ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1  ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1.log 2>&1!
 | 
				
			||||||
 | 
					Run successfully with command xxx
 | 
				
			||||||
 | 
					...
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					运行失败时会输出:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					Run failed with command - ./ocr_db_crnn det ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1  ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1.log 2>&1!
 | 
				
			||||||
 | 
					Run failed with command xxx
 | 
				
			||||||
 | 
					...
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					在./output/文件夹下,会存在如下日志,每一个日志都是不同配置下的log结果:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					<img src="lite_log.png" width="1000">
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					在每一个log中,都会调用autolog打印如下信息:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					<img src="lite_auto_log.png" width="1000">
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## 3. 更多教程
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					本文档为功能测试用,更详细的Lite端预测使用教程请参考:[Lite端部署](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/lite/readme.md)。
 | 
				
			||||||
@ -3,7 +3,7 @@ FILENAME=$1
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer',  
 | 
					# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer',  
 | 
				
			||||||
#                 'whole_infer', 'klquant_whole_infer',
 | 
					#                 'whole_infer', 'klquant_whole_infer',
 | 
				
			||||||
#                 'cpp_infer', 'serving_infer',  'lite_infer']
 | 
					#                 'cpp_infer', 'serving_infer']
 | 
				
			||||||
 | 
					
 | 
				
			||||||
MODE=$2
 | 
					MODE=$2
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -34,7 +34,7 @@ trainer_list=$(func_parser_value "${lines[14]}")
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer',  
 | 
					# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer',  
 | 
				
			||||||
#                 'whole_infer', 'klquant_whole_infer',
 | 
					#                 'whole_infer', 'klquant_whole_infer',
 | 
				
			||||||
#                 'cpp_infer', 'serving_infer',  'lite_infer']
 | 
					#                 'cpp_infer', 'serving_infer']
 | 
				
			||||||
MODE=$2
 | 
					MODE=$2
 | 
				
			||||||
 | 
					
 | 
				
			||||||
if [ ${MODE} = "lite_train_lite_infer" ];then
 | 
					if [ ${MODE} = "lite_train_lite_infer" ];then
 | 
				
			||||||
@ -169,40 +169,6 @@ if [ ${MODE} = "serving_infer" ];then
 | 
				
			|||||||
    cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_det_infer.tar && cd ../
 | 
					    cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_det_infer.tar && cd ../
 | 
				
			||||||
fi
 | 
					fi
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					 | 
				
			||||||
if [ ${MODE} = "lite_infer" ];then    
 | 
					 | 
				
			||||||
    # prepare lite nb model and test data
 | 
					 | 
				
			||||||
    current_dir=${PWD}
 | 
					 | 
				
			||||||
    wget -nc  -P ./models https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_det_opt.nb
 | 
					 | 
				
			||||||
    wget -nc  -P ./models https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_det_slim_opt.nb
 | 
					 | 
				
			||||||
    wget -nc  -P ./test_data https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar
 | 
					 | 
				
			||||||
    cd ./test_data && tar -xf icdar2015_lite.tar && rm icdar2015_lite.tar && cd ../
 | 
					 | 
				
			||||||
    # prepare lite env
 | 
					 | 
				
			||||||
    export http_proxy=http://172.19.57.45:3128
 | 
					 | 
				
			||||||
    export https_proxy=http://172.19.57.45:3128
 | 
					 | 
				
			||||||
    paddlelite_url=https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz
 | 
					 | 
				
			||||||
    paddlelite_zipfile=$(echo $paddlelite_url | awk -F "/" '{print $NF}')
 | 
					 | 
				
			||||||
    paddlelite_file=${paddlelite_zipfile:0:66}
 | 
					 | 
				
			||||||
    wget ${paddlelite_url}
 | 
					 | 
				
			||||||
    tar -xf ${paddlelite_zipfile}
 | 
					 | 
				
			||||||
    mkdir -p  ${paddlelite_file}/demo/cxx/ocr/test_lite
 | 
					 | 
				
			||||||
    mv models test_data ${paddlelite_file}/demo/cxx/ocr/test_lite
 | 
					 | 
				
			||||||
    cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_file}/demo/cxx/ocr/test_lite
 | 
					 | 
				
			||||||
    cp ./deploy/lite/* ${paddlelite_file}/demo/cxx/ocr/
 | 
					 | 
				
			||||||
    cp ${paddlelite_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_file}/demo/cxx/ocr/test_lite
 | 
					 | 
				
			||||||
    cp test_tipc/configs/ppocr_det_mobile_params.txt test_tipc/test_lite.sh test_tipc/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite
 | 
					 | 
				
			||||||
    cd ${paddlelite_file}/demo/cxx/ocr/
 | 
					 | 
				
			||||||
    git clone https://github.com/LDOUBLEV/AutoLog.git
 | 
					 | 
				
			||||||
    unset http_proxy
 | 
					 | 
				
			||||||
    unset https_proxy
 | 
					 | 
				
			||||||
    make -j
 | 
					 | 
				
			||||||
    sleep 1
 | 
					 | 
				
			||||||
    make -j
 | 
					 | 
				
			||||||
    cp ocr_db_crnn test_lite && cp test_lite/libpaddle_light_api_shared.so test_lite/libc++_shared.so
 | 
					 | 
				
			||||||
    tar -cf test_lite.tar ./test_lite && cp test_lite.tar ${current_dir} && cd ${current_dir}
 | 
					 | 
				
			||||||
fi
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
if [ ${MODE} = "paddle2onnx_infer" ];then
 | 
					if [ ${MODE} = "paddle2onnx_infer" ];then
 | 
				
			||||||
    # prepare serving env
 | 
					    # prepare serving env
 | 
				
			||||||
    python_name=$(func_parser_value "${lines[2]}")
 | 
					    python_name=$(func_parser_value "${lines[2]}")
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										55
									
								
								test_tipc/prepare_lite.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										55
									
								
								test_tipc/prepare_lite.sh
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,55 @@
 | 
				
			|||||||
 | 
					#!/bin/bash
 | 
				
			||||||
 | 
					source ./test_tipc/common_func.sh
 | 
				
			||||||
 | 
					FILENAME=$1
 | 
				
			||||||
 | 
					dataline=$(cat ${FILENAME})
 | 
				
			||||||
 | 
					# parser params
 | 
				
			||||||
 | 
					IFS=$'\n'
 | 
				
			||||||
 | 
					lines=(${dataline})
 | 
				
			||||||
 | 
					IFS=$'\n'
 | 
				
			||||||
 | 
					lite_model_list=$(func_parser_value "${lines[2]}")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# prepare lite .nb model
 | 
				
			||||||
 | 
					pip install paddlelite==2.9
 | 
				
			||||||
 | 
					current_dir=${PWD}
 | 
				
			||||||
 | 
					IFS="|"
 | 
				
			||||||
 | 
					model_path=./inference_models
 | 
				
			||||||
 | 
					for model in ${lite_model_list[*]}; do
 | 
				
			||||||
 | 
					    inference_model_url=https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/${model}.tar
 | 
				
			||||||
 | 
					    inference_model=${inference_model_url##*/}
 | 
				
			||||||
 | 
					    wget -nc  -P ${model_path} ${inference_model_url}
 | 
				
			||||||
 | 
					    cd ${model_path} && tar -xf ${inference_model} && cd ../
 | 
				
			||||||
 | 
					    model_dir=${model_path}/${inference_model%.*}
 | 
				
			||||||
 | 
					    model_file=${model_dir}/inference.pdmodel
 | 
				
			||||||
 | 
					    param_file=${model_dir}/inference.pdiparams
 | 
				
			||||||
 | 
					    paddle_lite_opt --model_dir=${model_dir} --model_file=${model_file} --param_file=${param_file} --valid_targets=arm --optimize_out=${model_dir}_opt
 | 
				
			||||||
 | 
					done
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# prepare test data
 | 
				
			||||||
 | 
					data_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar
 | 
				
			||||||
 | 
					model_path=./inference_models
 | 
				
			||||||
 | 
					inference_model=${inference_model_url##*/}
 | 
				
			||||||
 | 
					data_file=${data_url##*/}
 | 
				
			||||||
 | 
					wget -nc  -P ./inference_models ${inference_model_url}
 | 
				
			||||||
 | 
					wget -nc  -P ./test_data ${data_url}
 | 
				
			||||||
 | 
					cd ./inference_models && tar -xf ${inference_model} && cd ../
 | 
				
			||||||
 | 
					cd ./test_data && tar -xf ${data_file} && rm ${data_file} && cd ../
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# prepare lite env
 | 
				
			||||||
 | 
					paddlelite_url=https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz
 | 
				
			||||||
 | 
					paddlelite_zipfile=$(echo $paddlelite_url | awk -F "/" '{print $NF}')
 | 
				
			||||||
 | 
					paddlelite_file=${paddlelite_zipfile:0:66}
 | 
				
			||||||
 | 
					wget ${paddlelite_url} && tar -xf ${paddlelite_zipfile}
 | 
				
			||||||
 | 
					mkdir -p  ${paddlelite_file}/demo/cxx/ocr/test_lite
 | 
				
			||||||
 | 
					cp -r ${model_path}/*_opt.nb test_data ${paddlelite_file}/demo/cxx/ocr/test_lite
 | 
				
			||||||
 | 
					cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_file}/demo/cxx/ocr/test_lite
 | 
				
			||||||
 | 
					cp -r ./deploy/lite/* ${paddlelite_file}/demo/cxx/ocr/
 | 
				
			||||||
 | 
					cp ${paddlelite_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_file}/demo/cxx/ocr/test_lite
 | 
				
			||||||
 | 
					cp ${FILENAME} test_tipc/test_lite_arm_cpu_cpp.sh test_tipc/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite
 | 
				
			||||||
 | 
					cd ${paddlelite_file}/demo/cxx/ocr/
 | 
				
			||||||
 | 
					git clone https://github.com/cuicheng01/AutoLog.git
 | 
				
			||||||
 | 
					make -j
 | 
				
			||||||
 | 
					sleep 1
 | 
				
			||||||
 | 
					make -j
 | 
				
			||||||
 | 
					cp ocr_db_crnn test_lite && cp test_lite/libpaddle_light_api_shared.so test_lite/libc++_shared.so
 | 
				
			||||||
 | 
					tar -cf test_lite.tar ./test_lite && cp test_lite.tar ${current_dir} && cd ${current_dir}
 | 
				
			||||||
 | 
					rm -rf ${paddlelite_file}* && rm -rf ${model_path}
 | 
				
			||||||
@ -80,7 +80,7 @@ test_tipc/
 | 
				
			|||||||
├── test_train_inference_python.sh    # 测试python训练预测的主程序
 | 
					├── test_train_inference_python.sh    # 测试python训练预测的主程序
 | 
				
			||||||
├── test_inference_cpp.sh             # 测试c++预测的主程序
 | 
					├── test_inference_cpp.sh             # 测试c++预测的主程序
 | 
				
			||||||
├── test_serving.sh                   # 测试serving部署预测的主程序
 | 
					├── test_serving.sh                   # 测试serving部署预测的主程序
 | 
				
			||||||
├── test_lite.sh                      # 测试lite部署预测的主程序
 | 
					├── test_lite_arm_cpu_cpp.sh          # 测试lite在arm_cpu上部署的C++预测的主程序
 | 
				
			||||||
├── compare_results.py                # 用于对比log中的预测结果与results中的预存结果精度误差是否在限定范围内
 | 
					├── compare_results.py                # 用于对比log中的预测结果与results中的预存结果精度误差是否在限定范围内
 | 
				
			||||||
└── readme.md                         # 使用文档
 | 
					└── readme.md                         # 使用文档
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
@ -107,4 +107,4 @@ test_tipc/
 | 
				
			|||||||
[test_train_inference_python 使用](docs/test_train_inference_python.md)  
 | 
					[test_train_inference_python 使用](docs/test_train_inference_python.md)  
 | 
				
			||||||
[test_inference_cpp 使用](docs/test_inference_cpp.md)  
 | 
					[test_inference_cpp 使用](docs/test_inference_cpp.md)  
 | 
				
			||||||
[test_serving 使用](docs/test_serving.md)  
 | 
					[test_serving 使用](docs/test_serving.md)  
 | 
				
			||||||
[test_lite 使用](docs/test_lite.md)  
 | 
					[test_lite_arm_cpu_cpp 使用](docs/test_lite_arm_cpu_cpp.md)  
 | 
				
			||||||
 | 
				
			|||||||
@ -1,69 +0,0 @@
 | 
				
			|||||||
#!/bin/bash
 | 
					 | 
				
			||||||
source ./common_func.sh
 | 
					 | 
				
			||||||
export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
FILENAME=$1
 | 
					 | 
				
			||||||
dataline=$(awk 'NR==102, NR==111{print}'  $FILENAME)
 | 
					 | 
				
			||||||
echo $dataline
 | 
					 | 
				
			||||||
# parser params
 | 
					 | 
				
			||||||
IFS=$'\n'
 | 
					 | 
				
			||||||
lines=(${dataline})
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# parser lite inference
 | 
					 | 
				
			||||||
lite_inference_cmd=$(func_parser_value "${lines[1]}")
 | 
					 | 
				
			||||||
lite_model_dir_list=$(func_parser_value "${lines[2]}")
 | 
					 | 
				
			||||||
lite_cpu_threads_list=$(func_parser_value "${lines[3]}")
 | 
					 | 
				
			||||||
lite_batch_size_list=$(func_parser_value "${lines[4]}")
 | 
					 | 
				
			||||||
lite_power_mode_list=$(func_parser_value "${lines[5]}")
 | 
					 | 
				
			||||||
lite_infer_img_dir_list=$(func_parser_value "${lines[6]}")
 | 
					 | 
				
			||||||
lite_config_dir=$(func_parser_value "${lines[7]}")
 | 
					 | 
				
			||||||
lite_rec_dict_dir=$(func_parser_value "${lines[8]}")
 | 
					 | 
				
			||||||
lite_benchmark_value=$(func_parser_value "${lines[9]}")
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
LOG_PATH="./output"
 | 
					 | 
				
			||||||
mkdir -p ${LOG_PATH}
 | 
					 | 
				
			||||||
status_log="${LOG_PATH}/results.log"
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
function func_lite(){
 | 
					 | 
				
			||||||
    IFS='|'
 | 
					 | 
				
			||||||
    _script=$1
 | 
					 | 
				
			||||||
    _lite_model=$2
 | 
					 | 
				
			||||||
    _log_path=$3
 | 
					 | 
				
			||||||
    _img_dir=$4
 | 
					 | 
				
			||||||
    _config=$5
 | 
					 | 
				
			||||||
    if [[ $lite_model =~ "slim" ]]; then
 | 
					 | 
				
			||||||
        precision="INT8"
 | 
					 | 
				
			||||||
    else
 | 
					 | 
				
			||||||
        precision="FP32"
 | 
					 | 
				
			||||||
    fi
 | 
					 | 
				
			||||||
    is_single_img=$(echo $_img_dir | grep -E ".jpg|.jpeg|.png|.JPEG|.JPG")
 | 
					 | 
				
			||||||
    if [[ "$is_single_img" != "" ]]; then
 | 
					 | 
				
			||||||
        single_img="True"
 | 
					 | 
				
			||||||
    else
 | 
					 | 
				
			||||||
        single_img="False"
 | 
					 | 
				
			||||||
    fi
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    # lite inference
 | 
					 | 
				
			||||||
    for num_threads in ${lite_cpu_threads_list[*]}; do
 | 
					 | 
				
			||||||
        for power_mode in ${lite_power_mode_list[*]}; do
 | 
					 | 
				
			||||||
            for batchsize in ${lite_batch_size_list[*]}; do
 | 
					 | 
				
			||||||
                model_name=$(echo $lite_model | awk -F "/" '{print $NF}')
 | 
					 | 
				
			||||||
                _save_log_path="${_log_path}/lite_${model_name}_precision_${precision}_batchsize_${batchsize}_threads_${num_threads}_powermode_${power_mode}_singleimg_${single_img}.log"
 | 
					 | 
				
			||||||
                command="${_script} ${lite_model} ${precision} ${num_threads} ${batchsize} ${power_mode} ${_img_dir} ${_config} ${lite_benchmark_value} > ${_save_log_path} 2>&1"
 | 
					 | 
				
			||||||
                eval ${command}
 | 
					 | 
				
			||||||
                status_check $? "${command}" "${status_log}"
 | 
					 | 
				
			||||||
            done
 | 
					 | 
				
			||||||
        done
 | 
					 | 
				
			||||||
    done
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
echo "################### run test ###################"
 | 
					 | 
				
			||||||
IFS="|"
 | 
					 | 
				
			||||||
for lite_model in ${lite_model_dir_list[*]}; do
 | 
					 | 
				
			||||||
    #run lite inference
 | 
					 | 
				
			||||||
    for img_dir in ${lite_infer_img_dir_list[*]}; do
 | 
					 | 
				
			||||||
        func_lite "${lite_inference_cmd}" "${lite_model}" "${LOG_PATH}" "${img_dir}" "${lite_config_dir}"
 | 
					 | 
				
			||||||
    done
 | 
					 | 
				
			||||||
done
 | 
					 | 
				
			||||||
							
								
								
									
										60
									
								
								test_tipc/test_lite_arm_cpu_cpp.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										60
									
								
								test_tipc/test_lite_arm_cpu_cpp.sh
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,60 @@
 | 
				
			|||||||
 | 
					#!/bin/bash
 | 
				
			||||||
 | 
					source ./common_func.sh
 | 
				
			||||||
 | 
					export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					FILENAME=$1
 | 
				
			||||||
 | 
					dataline=$(cat $FILENAME)
 | 
				
			||||||
 | 
					# parser params
 | 
				
			||||||
 | 
					IFS=$'\n'
 | 
				
			||||||
 | 
					lines=(${dataline})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# parser lite inference
 | 
				
			||||||
 | 
					lite_inference_cmd=$(func_parser_value "${lines[1]}")
 | 
				
			||||||
 | 
					lite_model_dir_list=$(func_parser_value "${lines[2]}")
 | 
				
			||||||
 | 
					runtime_device=$(func_parser_value "${lines[3]}")
 | 
				
			||||||
 | 
					lite_cpu_threads_list=$(func_parser_value "${lines[4]}")
 | 
				
			||||||
 | 
					lite_batch_size_list=$(func_parser_value "${lines[5]}")
 | 
				
			||||||
 | 
					lite_infer_img_dir_list=$(func_parser_value "${lines[8]}")
 | 
				
			||||||
 | 
					lite_config_dir=$(func_parser_value "${lines[9]}")
 | 
				
			||||||
 | 
					lite_rec_dict_dir=$(func_parser_value "${lines[10]}")
 | 
				
			||||||
 | 
					lite_benchmark_value=$(func_parser_value "${lines[11]}")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					LOG_PATH="./output"
 | 
				
			||||||
 | 
					mkdir -p ${LOG_PATH}
 | 
				
			||||||
 | 
					status_log="${LOG_PATH}/results.log"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					function func_lite(){
 | 
				
			||||||
 | 
					    IFS='|'
 | 
				
			||||||
 | 
					    _script=$1
 | 
				
			||||||
 | 
					    _lite_model=$2
 | 
				
			||||||
 | 
					    _log_path=$3
 | 
				
			||||||
 | 
					    _img_dir=$4
 | 
				
			||||||
 | 
					    _config=$5
 | 
				
			||||||
 | 
					    if [[ $lite_model =~ "slim" ]]; then
 | 
				
			||||||
 | 
					        precision="INT8"
 | 
				
			||||||
 | 
					    else
 | 
				
			||||||
 | 
					        precision="FP32"
 | 
				
			||||||
 | 
					    fi
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    # lite inference
 | 
				
			||||||
 | 
					    for num_threads in ${lite_cpu_threads_list[*]}; do
 | 
				
			||||||
 | 
						for batchsize in ${lite_batch_size_list[*]}; do
 | 
				
			||||||
 | 
					            _save_log_path="${_log_path}/lite_${_lite_model}_runtime_device_${runtime_device}_precision_${precision}_batchsize_${batchsize}_threads_${num_threads}.log"
 | 
				
			||||||
 | 
					            command="${_script} ${_lite_model} ${runtime_device} ${precision} ${num_threads} ${batchsize}  ${_img_dir} ${_config} ${lite_benchmark_value} > ${_save_log_path} 2>&1"
 | 
				
			||||||
 | 
					            eval ${command}
 | 
				
			||||||
 | 
					            status_check $? "${command}" "${status_log}"
 | 
				
			||||||
 | 
					        done
 | 
				
			||||||
 | 
					    done
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					echo "################### run test ###################"
 | 
				
			||||||
 | 
					IFS="|"
 | 
				
			||||||
 | 
					for lite_model in ${lite_model_dir_list[*]}; do
 | 
				
			||||||
 | 
					    #run lite inference
 | 
				
			||||||
 | 
					    for img_dir in ${lite_infer_img_dir_list[*]}; do
 | 
				
			||||||
 | 
					        func_lite "${lite_inference_cmd}" "${lite_model}_opt.nb" "${LOG_PATH}" "${img_dir}" "${lite_config_dir}"
 | 
				
			||||||
 | 
					    done
 | 
				
			||||||
 | 
					done
 | 
				
			||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user