forked from mathewthe2/Game2Text
-
Notifications
You must be signed in to change notification settings - Fork 0
/
game2text.py
171 lines (140 loc) · 5.44 KB
/
game2text.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import eel
import threading, os, platform
from pathlib import Path
from ocr import detect_and_log
from translate import multi_translate
from hotkeys import refresh_ocr_hotkey, esc_hotkey
from util import RepeatedTimer, open_folder_by_relative_path, create_directory_if_not_exists
from audio import get_recommended_device_index, get_audio_objects
from recordaudio import RecordThread
from pynput import keyboard
from clipboard import clipboard_to_output, text_to_clipboard
from logger import get_time_string, AUDIO_LOG_PATH, SCRIPT_DIR
from ankiconnect import invoke, get_anki_models, update_anki_models, create_anki_note, fetch_anki_fields
from imageprofile import export_image_profile, load_image_profiles, open_image_profile
from dictionary import load_dictionary, look_up
from config import r_config, w_config, WINDOWS_HOTKEYS_CONFIG, APP_CONFIG, LOG_CONFIG
session_start_time = get_time_string()
# run_eel()
# Set web files folder and optionally specify which file types to check for eel.expose()
# *Default allowed_extensions are: ['.js', '.html', '.txt', '.htm', '.xhtml']
def close(page, sockets):
if not sockets:
os._exit(0)
@eel.expose
def recognize_image(engine, image, orientation):
return detect_and_log(engine, image, orientation, session_start_time, get_time_string(), audio_recorder)
@eel.expose
def export_image_filter_profile(profile):
return export_image_profile(profile)
@eel.expose
def load_image_filter_profiles():
return load_image_profiles()
@eel.expose
def open_image_filter_profile():
return open_image_profile()
@eel.expose
def translate(text):
return multi_translate(text)
@eel.expose
def monitor_clipboard():
if clipboard_timer.is_running:
clipboard_timer.stop()
else:
clipboard_timer.start()
@eel.expose
def start_manual_recording(request_time, session_start_time):
global manual_audio_recorder
global manual_audio_file_path
if manual_audio_recorder.is_recording():
stop_manual_recording()
file_name = request_time + '.' + r_config(LOG_CONFIG, 'logaudiotype')
manual_audio_file_path = str(Path(AUDIO_LOG_PATH, session_start_time, file_name))
device_index = get_recommended_device_index(r_config(LOG_CONFIG, 'logaudiohost'))
manual_audio_recorder = RecordThread(device_index, int(r_config(LOG_CONFIG, "logaudioframes")))
manual_audio_recorder.start()
@eel.expose
def stop_manual_recording():
if manual_audio_recorder.is_recording():
create_directory_if_not_exists(manual_audio_file_path)
manual_audio_recorder.stop_recording(manual_audio_file_path, -1)
file_name = os.path.basename(manual_audio_file_path)
return file_name
return ''
@eel.expose
def restart_audio_recording(device_index):
global audio_recorder
audio_recorder = RecordThread(device_index, int(r_config(LOG_CONFIG, "logaudioframes")))
audio_recorder.start()
@eel.expose
def copy_text_to_clipboard(text):
text_to_clipboard(text)
@eel.expose
def read_config(section, key):
return r_config(section, key)
@eel.expose
def update_config(section, d):
return w_config(section, d)
@eel.expose
def invoke_anki(action, params={}):
return invoke(action, params)
@eel.expose
def get_anki_card_models():
return get_anki_models()
@eel.expose
def fetch_anki_fields_by_modals(model_names):
fetch_anki_fields_thread = threading.Thread(target=fetch_anki_fields, args=((model_names,)))
fetch_anki_fields_thread.start()
@eel.expose
def update_anki_card_models(ankiModels):
return update_anki_models(ankiModels)
@eel.expose
def create_note(note_data):
return create_anki_note(note_data)
@eel.expose
def look_up_dictionary(word):
return look_up(word)
@eel.expose
def open_new_window(html_file, height=800, width=600):
eel.start(html_file,
close_callback=close,
mode=r_config(APP_CONFIG, "browser"),
host=r_config(APP_CONFIG, "host"),
size=(width, height),
port = 0)
return
def run_eel():
eel.init('web', allowed_extensions=['.js', '.html', '.map'])
eel.start('index.html',
close_callback=close,
mode=r_config(APP_CONFIG, "browser"),
host=r_config(APP_CONFIG, "host"),
port=int(r_config(APP_CONFIG, "port"))
)
main_thread = threading.Thread(target=run_eel, args=())
main_thread.start()
# Thread to load dictionaries
dictionary_path = str(Path(SCRIPT_DIR, 'dictionaries', 'jmdict_english.zip'))
dictionary_thread = threading.Thread(target=load_dictionary, args=((dictionary_path,)))
dictionary_thread.start()
# Thread to record audio continuously
recommended_audio_device_index = get_recommended_device_index(r_config(LOG_CONFIG, 'logaudiohost'))
audio_recorder = RecordThread(recommended_audio_device_index, int(r_config(LOG_CONFIG, "logaudioframes")))
is_log_audio = r_config(LOG_CONFIG, "logaudio").lower() == "true"
if is_log_audio and recommended_audio_device_index != -1:
audio_recorder.start()
# Thread to manually record audio
manual_audio_recorder = RecordThread(recommended_audio_device_index, int(r_config(LOG_CONFIG, "logaudioframes")))
manual_audio_file_path = ''
# Thread to export clipboard text continuously
clipboard_timer = RepeatedTimer(1, clipboard_to_output)
clipboard_timer.stop() # stop the initial timer
refresh_hotkey_string = {
"Linux" : "<ctrl>+q",
"Darwin": "<cmd>+b",
"Windows": r_config(WINDOWS_HOTKEYS_CONFIG, "refresh")
}
with keyboard.GlobalHotKeys({
refresh_hotkey_string[platform.system()]: refresh_ocr_hotkey,
'<esc>': esc_hotkey}) as h:
h.join()