mirror of
https://github.com/PaddlePaddle/PaddleOCR.git
synced 2025-11-08 13:54:21 +00:00
Merge pull request #5536 from LDOUBLEV/dygraph
[benchmark] fix pretrain model download
This commit is contained in:
commit
9c0a4d9d30
@ -26,35 +26,57 @@ def parse_args():
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--filename", type=str, help="The name of log which need to analysis.")
|
"--filename", type=str, help="The name of log which need to analysis.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--log_with_profiler", type=str, help="The path of train log with profiler")
|
"--log_with_profiler",
|
||||||
|
type=str,
|
||||||
|
help="The path of train log with profiler")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--profiler_path", type=str, help="The path of profiler timeline log.")
|
"--profiler_path", type=str, help="The path of profiler timeline log.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--keyword", type=str, help="Keyword to specify analysis data")
|
"--keyword", type=str, help="Keyword to specify analysis data")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--separator", type=str, default=None, help="Separator of different field in log")
|
"--separator",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Separator of different field in log")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--position', type=int, default=None, help='The position of data field')
|
'--position', type=int, default=None, help='The position of data field')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--range', type=str, default="", help='The range of data field to intercept')
|
'--range',
|
||||||
|
type=str,
|
||||||
|
default="",
|
||||||
|
help='The range of data field to intercept')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--base_batch_size', type=int, help='base_batch size on gpu')
|
'--base_batch_size', type=int, help='base_batch size on gpu')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--skip_steps', type=int, default=0, help='The number of steps to be skipped')
|
'--skip_steps',
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
help='The number of steps to be skipped')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--model_mode', type=int, default=-1, help='Analysis mode, default value is -1')
|
'--model_mode',
|
||||||
|
type=int,
|
||||||
|
default=-1,
|
||||||
|
help='Analysis mode, default value is -1')
|
||||||
|
parser.add_argument('--ips_unit', type=str, default=None, help='IPS unit')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--ips_unit', type=str, default=None, help='IPS unit')
|
'--model_name',
|
||||||
parser.add_argument(
|
type=str,
|
||||||
'--model_name', type=str, default=0, help='training model_name, transformer_base')
|
default=0,
|
||||||
|
help='training model_name, transformer_base')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--mission_name', type=str, default=0, help='training mission name')
|
'--mission_name', type=str, default=0, help='training mission name')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--direction_id', type=int, default=0, help='training direction_id')
|
'--direction_id', type=int, default=0, help='training direction_id')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--run_mode', type=str, default="sp", help='multi process or single process')
|
'--run_mode',
|
||||||
|
type=str,
|
||||||
|
default="sp",
|
||||||
|
help='multi process or single process')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--index', type=int, default=1, help='{1: speed, 2:mem, 3:profiler, 6:max_batch_size}')
|
'--index',
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
help='{1: speed, 2:mem, 3:profiler, 6:max_batch_size}')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--gpu_num', type=int, default=1, help='nums of training gpus')
|
'--gpu_num', type=int, default=1, help='nums of training gpus')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
@ -72,7 +94,12 @@ def _is_number(num):
|
|||||||
|
|
||||||
|
|
||||||
class TimeAnalyzer(object):
|
class TimeAnalyzer(object):
|
||||||
def __init__(self, filename, keyword=None, separator=None, position=None, range="-1"):
|
def __init__(self,
|
||||||
|
filename,
|
||||||
|
keyword=None,
|
||||||
|
separator=None,
|
||||||
|
position=None,
|
||||||
|
range="-1"):
|
||||||
if filename is None:
|
if filename is None:
|
||||||
raise Exception("Please specify the filename!")
|
raise Exception("Please specify the filename!")
|
||||||
|
|
||||||
@ -99,7 +126,8 @@ class TimeAnalyzer(object):
|
|||||||
|
|
||||||
# Distil the string from a line.
|
# Distil the string from a line.
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
line_words = line.split(self.separator) if self.separator else line.split()
|
line_words = line.split(
|
||||||
|
self.separator) if self.separator else line.split()
|
||||||
if args.position:
|
if args.position:
|
||||||
result = line_words[self.position]
|
result = line_words[self.position]
|
||||||
else:
|
else:
|
||||||
@ -108,27 +136,36 @@ class TimeAnalyzer(object):
|
|||||||
if line_words[i] == self.keyword:
|
if line_words[i] == self.keyword:
|
||||||
result = line_words[i + 1]
|
result = line_words[i + 1]
|
||||||
break
|
break
|
||||||
|
|
||||||
# Distil the result from the picked string.
|
# Distil the result from the picked string.
|
||||||
if not self.range:
|
if not self.range:
|
||||||
result = result[0:]
|
result = result[0:]
|
||||||
elif _is_number(self.range):
|
elif _is_number(self.range):
|
||||||
result = result[0: int(self.range)]
|
result = result[0:int(self.range)]
|
||||||
else:
|
else:
|
||||||
result = result[int(self.range.split(":")[0]): int(self.range.split(":")[1])]
|
result = result[int(self.range.split(":")[0]):int(
|
||||||
|
self.range.split(":")[1])]
|
||||||
self.records.append(float(result))
|
self.records.append(float(result))
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
print("line is: {}; separator={}; position={}".format(line, self.separator, self.position))
|
print("line is: {}; separator={}; position={}".format(
|
||||||
|
line, self.separator, self.position))
|
||||||
|
|
||||||
print("Extract {} records: separator={}; position={}".format(len(self.records), self.separator, self.position))
|
print("Extract {} records: separator={}; position={}".format(
|
||||||
|
len(self.records), self.separator, self.position))
|
||||||
|
|
||||||
def _get_fps(self, mode, batch_size, gpu_num, avg_of_records, run_mode, unit=None):
|
def _get_fps(self,
|
||||||
|
mode,
|
||||||
|
batch_size,
|
||||||
|
gpu_num,
|
||||||
|
avg_of_records,
|
||||||
|
run_mode,
|
||||||
|
unit=None):
|
||||||
if mode == -1 and run_mode == 'sp':
|
if mode == -1 and run_mode == 'sp':
|
||||||
assert unit, "Please set the unit when mode is -1."
|
assert unit, "Please set the unit when mode is -1."
|
||||||
fps = gpu_num * avg_of_records
|
fps = gpu_num * avg_of_records
|
||||||
elif mode == -1 and run_mode == 'mp':
|
elif mode == -1 and run_mode == 'mp':
|
||||||
assert unit, "Please set the unit when mode is -1."
|
assert unit, "Please set the unit when mode is -1."
|
||||||
fps = gpu_num * avg_of_records #temporarily, not used now
|
fps = gpu_num * avg_of_records #temporarily, not used now
|
||||||
print("------------this is mp")
|
print("------------this is mp")
|
||||||
elif mode == 0:
|
elif mode == 0:
|
||||||
# s/step -> samples/s
|
# s/step -> samples/s
|
||||||
@ -155,12 +192,20 @@ class TimeAnalyzer(object):
|
|||||||
|
|
||||||
return fps, unit
|
return fps, unit
|
||||||
|
|
||||||
def analysis(self, batch_size, gpu_num=1, skip_steps=0, mode=-1, run_mode='sp', unit=None):
|
def analysis(self,
|
||||||
|
batch_size,
|
||||||
|
gpu_num=1,
|
||||||
|
skip_steps=0,
|
||||||
|
mode=-1,
|
||||||
|
run_mode='sp',
|
||||||
|
unit=None):
|
||||||
if batch_size <= 0:
|
if batch_size <= 0:
|
||||||
print("base_batch_size should larger than 0.")
|
print("base_batch_size should larger than 0.")
|
||||||
return 0, ''
|
return 0, ''
|
||||||
|
|
||||||
if len(self.records) <= skip_steps: # to address the condition which item of log equals to skip_steps
|
if len(
|
||||||
|
self.records
|
||||||
|
) <= skip_steps: # to address the condition which item of log equals to skip_steps
|
||||||
print("no records")
|
print("no records")
|
||||||
return 0, ''
|
return 0, ''
|
||||||
|
|
||||||
@ -180,16 +225,20 @@ class TimeAnalyzer(object):
|
|||||||
skip_max = self.records[i]
|
skip_max = self.records[i]
|
||||||
|
|
||||||
avg_of_records = sum_of_records / float(count)
|
avg_of_records = sum_of_records / float(count)
|
||||||
avg_of_records_skipped = sum_of_records_skipped / float(count - skip_steps)
|
avg_of_records_skipped = sum_of_records_skipped / float(count -
|
||||||
|
skip_steps)
|
||||||
|
|
||||||
fps, fps_unit = self._get_fps(mode, batch_size, gpu_num, avg_of_records, run_mode, unit)
|
fps, fps_unit = self._get_fps(mode, batch_size, gpu_num, avg_of_records,
|
||||||
fps_skipped, _ = self._get_fps(mode, batch_size, gpu_num, avg_of_records_skipped, run_mode, unit)
|
run_mode, unit)
|
||||||
|
fps_skipped, _ = self._get_fps(mode, batch_size, gpu_num,
|
||||||
|
avg_of_records_skipped, run_mode, unit)
|
||||||
if mode == -1:
|
if mode == -1:
|
||||||
print("average ips of %d steps, skip 0 step:" % count)
|
print("average ips of %d steps, skip 0 step:" % count)
|
||||||
print("\tAvg: %.3f %s" % (avg_of_records, fps_unit))
|
print("\tAvg: %.3f %s" % (avg_of_records, fps_unit))
|
||||||
print("\tFPS: %.3f %s" % (fps, fps_unit))
|
print("\tFPS: %.3f %s" % (fps, fps_unit))
|
||||||
if skip_steps > 0:
|
if skip_steps > 0:
|
||||||
print("average ips of %d steps, skip %d steps:" % (count, skip_steps))
|
print("average ips of %d steps, skip %d steps:" %
|
||||||
|
(count, skip_steps))
|
||||||
print("\tAvg: %.3f %s" % (avg_of_records_skipped, fps_unit))
|
print("\tAvg: %.3f %s" % (avg_of_records_skipped, fps_unit))
|
||||||
print("\tMin: %.3f %s" % (skip_min, fps_unit))
|
print("\tMin: %.3f %s" % (skip_min, fps_unit))
|
||||||
print("\tMax: %.3f %s" % (skip_max, fps_unit))
|
print("\tMax: %.3f %s" % (skip_max, fps_unit))
|
||||||
@ -199,7 +248,8 @@ class TimeAnalyzer(object):
|
|||||||
print("\tAvg: %.3f steps/s" % avg_of_records)
|
print("\tAvg: %.3f steps/s" % avg_of_records)
|
||||||
print("\tFPS: %.3f %s" % (fps, fps_unit))
|
print("\tFPS: %.3f %s" % (fps, fps_unit))
|
||||||
if skip_steps > 0:
|
if skip_steps > 0:
|
||||||
print("average latency of %d steps, skip %d steps:" % (count, skip_steps))
|
print("average latency of %d steps, skip %d steps:" %
|
||||||
|
(count, skip_steps))
|
||||||
print("\tAvg: %.3f steps/s" % avg_of_records_skipped)
|
print("\tAvg: %.3f steps/s" % avg_of_records_skipped)
|
||||||
print("\tMin: %.3f steps/s" % skip_min)
|
print("\tMin: %.3f steps/s" % skip_min)
|
||||||
print("\tMax: %.3f steps/s" % skip_max)
|
print("\tMax: %.3f steps/s" % skip_max)
|
||||||
@ -209,7 +259,8 @@ class TimeAnalyzer(object):
|
|||||||
print("\tAvg: %.3f s/step" % avg_of_records)
|
print("\tAvg: %.3f s/step" % avg_of_records)
|
||||||
print("\tFPS: %.3f %s" % (fps, fps_unit))
|
print("\tFPS: %.3f %s" % (fps, fps_unit))
|
||||||
if skip_steps > 0:
|
if skip_steps > 0:
|
||||||
print("average latency of %d steps, skip %d steps:" % (count, skip_steps))
|
print("average latency of %d steps, skip %d steps:" %
|
||||||
|
(count, skip_steps))
|
||||||
print("\tAvg: %.3f s/step" % avg_of_records_skipped)
|
print("\tAvg: %.3f s/step" % avg_of_records_skipped)
|
||||||
print("\tMin: %.3f s/step" % skip_min)
|
print("\tMin: %.3f s/step" % skip_min)
|
||||||
print("\tMax: %.3f s/step" % skip_max)
|
print("\tMax: %.3f s/step" % skip_max)
|
||||||
@ -236,7 +287,8 @@ if __name__ == "__main__":
|
|||||||
if args.gpu_num == 1:
|
if args.gpu_num == 1:
|
||||||
run_info["log_with_profiler"] = args.log_with_profiler
|
run_info["log_with_profiler"] = args.log_with_profiler
|
||||||
run_info["profiler_path"] = args.profiler_path
|
run_info["profiler_path"] = args.profiler_path
|
||||||
analyzer = TimeAnalyzer(args.filename, args.keyword, args.separator, args.position, args.range)
|
analyzer = TimeAnalyzer(args.filename, args.keyword, args.separator,
|
||||||
|
args.position, args.range)
|
||||||
run_info["FINAL_RESULT"], run_info["UNIT"] = analyzer.analysis(
|
run_info["FINAL_RESULT"], run_info["UNIT"] = analyzer.analysis(
|
||||||
batch_size=args.base_batch_size,
|
batch_size=args.base_batch_size,
|
||||||
gpu_num=args.gpu_num,
|
gpu_num=args.gpu_num,
|
||||||
@ -245,29 +297,50 @@ if __name__ == "__main__":
|
|||||||
run_mode=args.run_mode,
|
run_mode=args.run_mode,
|
||||||
unit=args.ips_unit)
|
unit=args.ips_unit)
|
||||||
try:
|
try:
|
||||||
if int(os.getenv('job_fail_flag')) == 1 or int(run_info["FINAL_RESULT"]) == 0:
|
if int(os.getenv('job_fail_flag')) == 1 or int(run_info[
|
||||||
|
"FINAL_RESULT"]) == 0:
|
||||||
run_info["JOB_FAIL_FLAG"] = 1
|
run_info["JOB_FAIL_FLAG"] = 1
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
elif args.index == 3:
|
elif args.index == 3:
|
||||||
run_info["FINAL_RESULT"] = {}
|
run_info["FINAL_RESULT"] = {}
|
||||||
records_fo_total = TimeAnalyzer(args.filename, 'Framework overhead', None, 3, '').records
|
records_fo_total = TimeAnalyzer(args.filename, 'Framework overhead',
|
||||||
records_fo_ratio = TimeAnalyzer(args.filename, 'Framework overhead', None, 5).records
|
None, 3, '').records
|
||||||
records_ct_total = TimeAnalyzer(args.filename, 'Computation time', None, 3, '').records
|
records_fo_ratio = TimeAnalyzer(args.filename, 'Framework overhead',
|
||||||
records_gm_total = TimeAnalyzer(args.filename, 'GpuMemcpy Calls', None, 4, '').records
|
None, 5).records
|
||||||
records_gm_ratio = TimeAnalyzer(args.filename, 'GpuMemcpy Calls', None, 6).records
|
records_ct_total = TimeAnalyzer(args.filename, 'Computation time',
|
||||||
records_gmas_total = TimeAnalyzer(args.filename, 'GpuMemcpyAsync Calls', None, 4, '').records
|
None, 3, '').records
|
||||||
records_gms_total = TimeAnalyzer(args.filename, 'GpuMemcpySync Calls', None, 4, '').records
|
records_gm_total = TimeAnalyzer(args.filename,
|
||||||
run_info["FINAL_RESULT"]["Framework_Total"] = records_fo_total[0] if records_fo_total else 0
|
'GpuMemcpy Calls',
|
||||||
run_info["FINAL_RESULT"]["Framework_Ratio"] = records_fo_ratio[0] if records_fo_ratio else 0
|
None, 4, '').records
|
||||||
run_info["FINAL_RESULT"]["ComputationTime_Total"] = records_ct_total[0] if records_ct_total else 0
|
records_gm_ratio = TimeAnalyzer(args.filename,
|
||||||
run_info["FINAL_RESULT"]["GpuMemcpy_Total"] = records_gm_total[0] if records_gm_total else 0
|
'GpuMemcpy Calls',
|
||||||
run_info["FINAL_RESULT"]["GpuMemcpy_Ratio"] = records_gm_ratio[0] if records_gm_ratio else 0
|
None, 6).records
|
||||||
run_info["FINAL_RESULT"]["GpuMemcpyAsync_Total"] = records_gmas_total[0] if records_gmas_total else 0
|
records_gmas_total = TimeAnalyzer(args.filename,
|
||||||
run_info["FINAL_RESULT"]["GpuMemcpySync_Total"] = records_gms_total[0] if records_gms_total else 0
|
'GpuMemcpyAsync Calls',
|
||||||
|
None, 4, '').records
|
||||||
|
records_gms_total = TimeAnalyzer(args.filename,
|
||||||
|
'GpuMemcpySync Calls',
|
||||||
|
None, 4, '').records
|
||||||
|
run_info["FINAL_RESULT"]["Framework_Total"] = records_fo_total[
|
||||||
|
0] if records_fo_total else 0
|
||||||
|
run_info["FINAL_RESULT"]["Framework_Ratio"] = records_fo_ratio[
|
||||||
|
0] if records_fo_ratio else 0
|
||||||
|
run_info["FINAL_RESULT"][
|
||||||
|
"ComputationTime_Total"] = records_ct_total[
|
||||||
|
0] if records_ct_total else 0
|
||||||
|
run_info["FINAL_RESULT"]["GpuMemcpy_Total"] = records_gm_total[
|
||||||
|
0] if records_gm_total else 0
|
||||||
|
run_info["FINAL_RESULT"]["GpuMemcpy_Ratio"] = records_gm_ratio[
|
||||||
|
0] if records_gm_ratio else 0
|
||||||
|
run_info["FINAL_RESULT"][
|
||||||
|
"GpuMemcpyAsync_Total"] = records_gmas_total[
|
||||||
|
0] if records_gmas_total else 0
|
||||||
|
run_info["FINAL_RESULT"]["GpuMemcpySync_Total"] = records_gms_total[
|
||||||
|
0] if records_gms_total else 0
|
||||||
else:
|
else:
|
||||||
print("Not support!")
|
print("Not support!")
|
||||||
except Exception:
|
except Exception:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
print("{}".format(json.dumps(run_info))) # it's required, for the log file path insert to the database
|
print("{}".format(json.dumps(run_info))
|
||||||
|
) # it's required, for the log file path insert to the database
|
||||||
|
|||||||
@ -58,3 +58,4 @@ source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合
|
|||||||
_set_params $@
|
_set_params $@
|
||||||
#_train # 如果只想产出训练log,不解析,可取消注释
|
#_train # 如果只想产出训练log,不解析,可取消注释
|
||||||
_run # 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开
|
_run # 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开
|
||||||
|
|
||||||
|
|||||||
@ -36,3 +36,4 @@ for model_mode in ${model_mode_list[@]}; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -3,8 +3,6 @@ source test_tipc/common_func.sh
|
|||||||
|
|
||||||
# set env
|
# set env
|
||||||
python=python
|
python=python
|
||||||
export model_branch=`git symbolic-ref HEAD 2>/dev/null | cut -d"/" -f 3`
|
|
||||||
export model_commit=$(git log|head -n1|awk '{print $2}')
|
|
||||||
export str_tmp=$(echo `pip list|grep paddlepaddle-gpu|awk -F ' ' '{print $2}'`)
|
export str_tmp=$(echo `pip list|grep paddlepaddle-gpu|awk -F ' ' '{print $2}'`)
|
||||||
export frame_version=${str_tmp%%.post*}
|
export frame_version=${str_tmp%%.post*}
|
||||||
export frame_commit=$(echo `${python} -c "import paddle;print(paddle.version.commit)"`)
|
export frame_commit=$(echo `${python} -c "import paddle;print(paddle.version.commit)"`)
|
||||||
|
|||||||
@ -24,7 +24,17 @@ if [ ${MODE} = "benchmark_train" ];then
|
|||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
if [[ ${model_name} =~ "det_mv3_db_v2_0" || ${model_name} =~ "det_r50_vd_east_v2_0" || ${model_name} =~ "det_r50_vd_pse_v2_0" || ${model_name} =~ "det_r18_db_v2_0" ]];then
|
if [[ ${model_name} =~ "det_mv3_db_v2_0" || ${model_name} =~ "det_r50_vd_east_v2_0" || ${model_name} =~ "det_r50_vd_pse_v2_0" || ${model_name} =~ "det_r18_db_v2_0" ]];then
|
||||||
rm -rf ./train_data/icdar2015
|
rm -rf ./train_data/icdar2015
|
||||||
wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate
|
wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate
|
||||||
|
wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar --no-check-certificate
|
||||||
|
cd ./train_data/ && tar xf icdar2015.tar && cd ../
|
||||||
|
fi
|
||||||
|
if [[ ${model_name} =~ "det_r50_vd_east_v2_0" || ${model_name} =~ "det_r50_vd_pse_v2_0" ]];then
|
||||||
|
wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/ResNet50_vd_ssld_pretrained.pdparams --no-check-certificate
|
||||||
|
wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar --no-check-certificate
|
||||||
|
cd ./train_data/ && tar xf icdar2015.tar && cd ../
|
||||||
|
fi
|
||||||
|
if [[ ${model_name} =~ "det_r18_db_v2_0" ]];then
|
||||||
|
wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/ResNet18_vd_pretrained.pdparams --no-check-certificate
|
||||||
wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar --no-check-certificate
|
wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar --no-check-certificate
|
||||||
cd ./train_data/ && tar xf icdar2015.tar && cd ../
|
cd ./train_data/ && tar xf icdar2015.tar && cd ../
|
||||||
fi
|
fi
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user