Skip to content

Commit

Permalink
[CLEANUP] Make Hedy PEP8 compliant (hedyorg#3602)
Browse files Browse the repository at this point in the history
**Description**

Since hedyorg#3410 we introduced a git pre-commit hook to lint modified code. However, much of the Hedy codebase is still not PEP8 compliant, in this PR we'll change that by applying autopep8 to the whole codebase with the following parameters:

```
autopep8 --in-place --max-line-length 120  -aaa --jobs 0 [files]
```
A few changes have been made to the pre-commit hook:
* The max line length now is 120
* I excluded the max line check from utils.py  because the number dictionary is too long.

**Checklist**
Done? Check if you have it all in place using this list:*
  
- [ ] Describes changes in the format above (present tense)
- [ ] Links to an existing issue or discussion 
- [ ] Has a "How to test" section

If you're unsure about any of these, don't hesitate to ask. We're here to help!
  • Loading branch information
jpelay authored Nov 25, 2022
1 parent a4e4d69 commit f11a97b
Show file tree
Hide file tree
Showing 95 changed files with 3,977 additions and 3,561 deletions.
3 changes: 1 addition & 2 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -102,10 +102,9 @@ bash feed_dev_database.sh
```

## Python code styling
As this project is growing and multiple people are working on it, we want to move to a more uniformly styled code base. We choose to stick to PEP8 guidelines, with the exception of a max line length of 100 characters instead of 79. To ensure your code adheres to these guidelines, you can install the pre-commit configuration to automatically check modified code when you make a commit. Installing this pre-commit hook has to be done manually (for security reasons) and can be done using the following commands:
As this project is growing and multiple people are working on it, we want to move to a more uniformly styled code base. We choose to stick to PEP8 guidelines, with the exception of a max line length of 120 characters instead of 79. To ensure your code adheres to these guidelines, you can install the pre-commit configuration to automatically check modified code when you make a commit. Installing this pre-commit hook has to be done manually (for security reasons) and can be done using the following commands. The pre-commit hook is available for installation once you run `requirements.txt`:

```
pip install pre-commit
pre-commit install
```

Expand Down
476 changes: 308 additions & 168 deletions app.py

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
athena_query = os.getenv('AWS_ATHENA_PREPARE_STATEMENT')

config = {
'port': os.getenv ('PORT') or 8080,
'port': os.getenv('PORT') or 8080,
'session': {
'cookie_name': 'hedy',
# in minutes
Expand Down Expand Up @@ -44,6 +44,6 @@
's3_output': 's3://hedy-query-outputs/',
'max_results': 50
},
#enables the quiz environment by setting the config variable on True
# enables the quiz environment by setting the config variable on True
'quiz-enabled': True,
}
16 changes: 8 additions & 8 deletions content/yaml_to_lark_utils.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
from pathlib import Path
import collections
import copy
import os
import yaml


def extract_Lark_grammar_from_yaml():
"""Creates a lark file in ../grammars/ for all yaml files located in ../content/keywords/.
If a keyword is not yet translated, it will use the English translation of the keyword
Args:
only_new_lang (bool, optional): Specifies if only a lark file should be created for a new keyword language or for all languages. Defaults to True.
only_new_lang (bool, optional): Specifies if only a lark file should be created for a new keyword language
or for all languages. Defaults to True.
"""
dirname = os.path.dirname(__file__)
input_path = os.path.join(dirname, 'keywords')
Expand Down Expand Up @@ -44,11 +45,11 @@ def extract_Lark_grammar_from_yaml():
translation_copy = copy.deepcopy(translations)
for k, v in translation_copy.items():
if yaml_lang == "ar":
mixed_tatweel_in = ''.join([' "ـ"* ' + '"'+l+'"' for l in v]) + ' "ـ"* '
translations[k] = mixed_tatweel_in
mixed_tatweel_in = ''.join([' "ـ"* ' + '"' + lang + '"' for lang in v]) + ' "ـ"* '
translations[k] = mixed_tatweel_in
else:
# other languages need their translations surrounded by "'s
translations[k] = '"' + v + '"'
# other languages need their translations surrounded by "'s
translations[k] = '"' + v + '"'

# we use | if we have multiple options, such as repete and repète
if "|" in v:
Expand All @@ -64,6 +65,5 @@ def extract_Lark_grammar_from_yaml():
with open(lark_filesname_with_path, 'w', encoding='utf-8') as f:
f.write(translated_template)

extract_Lark_grammar_from_yaml()


extract_Lark_grammar_from_yaml()
117 changes: 60 additions & 57 deletions docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,68 +3,71 @@
import re
import unicodedata


def slugify(s):
if s is None:
return None
return re.sub('[^a-zA-Z0-9]', '-', strip_accents(s)).lower()
if s is None:
return None
return re.sub('[^a-zA-Z0-9]', '-', strip_accents(s)).lower()


def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')


class DocCollection:
def __init__(self, keys=[], synth={}):
self.docs = []
self.index = {}
self.keys = keys
self.synth = synth

def get(self, *keys):
v = self.index
for key in keys:
v = v.get(key)
if not v:
return {} if len(keys) < len(self.keys) else None
return v

def load_dir(self, rootdir):
files = glob.glob(f'{rootdir}/**/*.md', recursive=True)
for file in sorted(files):
doc = MarkdownDoc.from_file(file)

for k, v in self.synth.items():
doc.front_matter[k] = v(doc)

self.docs.append(doc)
self.add_to_index(doc)

def add_to_index(self, doc):
if not self.keys:
return

d = self.index
for key in self.keys[:-1]:
value = doc.front_matter.get(key, None)
if not value:
return
d = d.setdefault(value, {})

key = self.keys[-1]
value = doc.front_matter.get(key, None)
if value:
d[value] = doc
def __init__(self, keys=[], synth={}):
self.docs = []
self.index = {}
self.keys = keys
self.synth = synth

def get(self, *keys):
v = self.index
for key in keys:
v = v.get(key)
if not v:
return {} if len(keys) < len(self.keys) else None
return v

def load_dir(self, rootdir):
files = glob.glob(f'{rootdir}/**/*.md', recursive=True)
for file in sorted(files):
doc = MarkdownDoc.from_file(file)

for k, v in self.synth.items():
doc.front_matter[k] = v(doc)

self.docs.append(doc)
self.add_to_index(doc)

def add_to_index(self, doc):
if not self.keys:
return

d = self.index
for key in self.keys[:-1]:
value = doc.front_matter.get(key, None)
if not value:
return
d = d.setdefault(value, {})

key = self.keys[-1]
value = doc.front_matter.get(key, None)
if value:
d[value] = doc


class MarkdownDoc:
@staticmethod
def from_file(filename):
with open(filename, 'r', encoding='utf-8') as f:
contents = f.read()
parts = re.split('^---+$', contents, maxsplit=1, flags=re.M)
if len(parts) == 1:
return MarkdownDoc({}, parts[0])
return MarkdownDoc(yaml.safe_load(parts[0]), parts[1])

def __init__(self, front_matter, doc):
self.front_matter = front_matter
self.markdown = doc
@staticmethod
def from_file(filename):
with open(filename, 'r', encoding='utf-8') as f:
contents = f.read()
parts = re.split('^---+$', contents, maxsplit=1, flags=re.M)
if len(parts) == 1:
return MarkdownDoc({}, parts[0])
return MarkdownDoc(yaml.safe_load(parts[0]), parts[1])

def __init__(self, front_matter, doc):
self.front_matter = front_matter
self.markdown = doc
Loading

0 comments on commit f11a97b

Please sign in to comment.