forked from PaddlePaddle/PaddleOCR
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutility.py
86 lines (78 loc) · 2.8 KB
/
utility.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
from PIL import Image
import numpy as np
from tools.infer.utility import draw_ocr_box_txt, str2bool, init_args as infer_args
def init_args():
parser = infer_args()
# params for output
parser.add_argument("--output", type=str, default='./output')
# params for table structure
parser.add_argument("--table_max_len", type=int, default=488)
parser.add_argument("--table_model_dir", type=str)
parser.add_argument(
"--table_char_dict_path",
type=str,
default="../ppocr/utils/dict/table_structure_dict.txt")
# params for layout
parser.add_argument(
"--layout_path_model",
type=str,
default="lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config")
parser.add_argument(
"--layout_label_map",
type=ast.literal_eval,
default=None,
help='label map according to ppstructure/layout/README_ch.md')
# params for inference
parser.add_argument(
"--mode",
type=str,
default='structure',
help='structure and vqa is supported')
parser.add_argument(
"--layout",
type=str2bool,
default=True,
help='Whether to enable layout analysis')
parser.add_argument(
"--table",
type=str2bool,
default=True,
help='In the forward, whether the table area uses table recognition')
parser.add_argument(
"--ocr",
type=str2bool,
default=True,
help='In the forward, whether the non-table area is recognition by ocr')
return parser
def parse_args():
parser = init_args()
return parser.parse_args()
def draw_structure_result(image, result, font_path):
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
boxes, txts, scores = [], [], []
for region in result:
if region['type'] == 'Table':
pass
else:
for text_result in region['res']:
boxes.append(np.array(text_result['text_region']))
txts.append(text_result['text'])
scores.append(text_result['confidence'])
im_show = draw_ocr_box_txt(
image, boxes, txts, scores, font_path=font_path, drop_score=0)
return im_show