-
Notifications
You must be signed in to change notification settings - Fork 1
/
poke_profile.py
108 lines (98 loc) · 3.13 KB
/
poke_profile.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# import the necessary packages
from PIL import Image
import pytesseract
import cv2
import numpy as np
from skimage.feature import match_template
from fuzzywuzzy import fuzz
orig_gray = None
resultImage = None
x = None
y = None
levelarea = None
def _parseLevel(gray, template):
# find location of level
global resultImage
resultImage = match_template(gray, template)
ij = np.unravel_index(np.argmax(resultImage), resultImage.shape)
global x
global y
x, y = ij[::-1]
# extract level from image
height, width = gray.shape
global levelarea
levelarea = gray[y-int(0.1*width):y+int(0.05*width),x-int(0.03*width):x+int(0.12*width)]
level = pytesseract.image_to_string(levelarea)
reqLevel = [int(s) for s in level.split() if s.isdigit()]
return reqLevel
def getLevel(imagepath, username):
result = [0,False]
# load the example image and convert it to grayscale
image = cv2.imread(imagepath)
template_ios = cv2.imread("template_ios.PNG")
template_android = cv2.imread("template_android.PNG")
if image is None:
print("Error while reading file")
return None
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
global orig_gray
orig_gray = gray
gray_template_ios = cv2.cvtColor(template_ios, cv2.COLOR_RGB2GRAY)
gray_template_android = cv2.cvtColor(template_android, cv2.COLOR_RGB2GRAY)
# test for android template
reqLevel = _parseLevel(gray, gray_template_android)
if reqLevel == []:
# if fail, test for iOS template
reqLevel = _parseLevel(gray, gray_template_ios)
if reqLevel == []:
result[0] = None
else:
result[0] = reqLevel[0]
else:
result[0] = reqLevel[0]
# apply thresholding to preprocess the image
gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# use tesseract to parse image to text
text = pytesseract.image_to_string(gray)
clearText = ''.join(e for e in text if e.isalnum())
# fuzzy match username to text from image
if (fuzz.partial_ratio(username, clearText) >= 80):
# at least 80% match
result[1] = True
else:
result[1] = False
return result
# main if module used standalone
if __name__ == '__main__':
import argparse
import matplotlib.pyplot as plt
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image to be OCR'd")
ap.add_argument("-u", "--username", required=True,
help="username to search in OCR output")
ap.add_argument("-p", "--plot", type=str, default="false",
help="true, to display plot, false else")
args = vars(ap.parse_args())
result = getLevel(args["image"],args["username"])
if result is not None:
print("Level Found: " + str(result[0]))
if result[1]:
print("Username found: " + args["username"])
else:
print("No Username found")
# plott images
if (args["plot"] == "true"):
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1)
ax2 = plt.subplot(1, 3, 3)
ax3 = plt.subplot(1, 3, 2, sharex=ax2, sharey=ax2)
ax1.imshow(orig_gray)
ax2.imshow(levelarea)
ax3.imshow(resultImage)
ax3.set_axis_off()
ax3.set_title('`match_template`\nresult')
# highlight matched region
ax3.autoscale(False)
ax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()