forked from Just-Some-Bots/MusicBot
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
85 lines (63 loc) · 2.27 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import re
import aiohttp
import decimal
import unicodedata
from hashlib import md5
from .constants import DISCORD_MSG_CHAR_LIMIT
def load_file(filename, skip_commented_lines=True, comment_char='#'):
try:
with open(filename, encoding='utf8') as f:
results = []
for line in f:
line = line.strip()
if line and not (skip_commented_lines and line.startswith(comment_char)):
results.append(line)
return results
except IOError as e:
print("Error loading", filename, e)
return []
def write_file(filename, contents):
with open(filename, 'w', encoding='utf8') as f:
for item in contents:
f.write(str(item))
f.write('\n')
def slugify(value):
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return re.sub('[-\s]+', '-', value)
def sane_round_int(x):
return int(decimal.Decimal(x).quantize(1, rounding=decimal.ROUND_HALF_UP))
def paginate(content, *, length=DISCORD_MSG_CHAR_LIMIT, reserve=0):
"""
Split up a large string or list of strings into chunks for sending to discord.
"""
if type(content) == str:
contentlist = content.split('\n')
elif type(content) == list:
contentlist = content
else:
raise ValueError("Content must be str or list, not %s" % type(content))
chunks = []
currentchunk = ''
for line in contentlist:
if len(currentchunk) + len(line) < length - reserve:
currentchunk += line + '\n'
else:
chunks.append(currentchunk)
currentchunk = ''
if currentchunk:
chunks.append(currentchunk)
return chunks
async def get_header(session, url, headerfield=None, *, timeout=5):
with aiohttp.Timeout(timeout):
async with session.head(url) as response:
if headerfield:
return response.headers.get(headerfield)
else:
return response.headers
def md5sum(filename, limit=0):
fhash = md5()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(8192), b""):
fhash.update(chunk)
return fhash.hexdigest()[-limit:]