forked from RasaHQ/rasa
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
added pep8 test and modified source to honor pep8
- Loading branch information
Showing
25 changed files
with
263 additions
and
254 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,61 +1,59 @@ | ||
|
||
|
||
def test_luis_request(): | ||
from rasa_nlu.emulators.luis import LUISEmulator | ||
em = LUISEmulator() | ||
norm = em.normalise_request_json({"q":["arb text"]}) | ||
assert norm == {"text":"arb text"} | ||
norm = em.normalise_request_json({"q": ["arb text"]}) | ||
assert norm == {"text": "arb text"} | ||
|
||
|
||
def test_luis_response(): | ||
from rasa_nlu.emulators.luis import LUISEmulator | ||
em = LUISEmulator() | ||
data = {"text":"I want italian food","intent":"inform","entities":{"cuisine":"italian"}} | ||
data = {"text": "I want italian food", "intent": "inform", "entities": {"cuisine": "italian"}} | ||
norm = em.normalise_response_json(data) | ||
assert norm == { | ||
"query": data["text"], | ||
"topScoringIntent": { | ||
"intent": "inform", | ||
"score": None | ||
}, | ||
"entities": [ | ||
"query": data["text"], | ||
"topScoringIntent": { | ||
"intent": "inform", | ||
"score": None | ||
}, | ||
"entities": [ | ||
{ | ||
"entity": e[0], | ||
"type": e[1], | ||
"startIndex": None, | ||
"endIndex": None, | ||
"score": None | ||
"entity": e[0], | ||
"type": e[1], | ||
"startIndex": None, | ||
"endIndex": None, | ||
"score": None | ||
} for e in data["entities"] | ||
] | ||
} | ||
] | ||
} | ||
|
||
|
||
def test_wit_request(): | ||
from rasa_nlu.emulators.wit import WitEmulator | ||
em = WitEmulator() | ||
norm = em.normalise_request_json({"q":["arb text"]}) | ||
assert norm == {"text":"arb text"} | ||
norm = em.normalise_request_json({"q": ["arb text"]}) | ||
assert norm == {"text": "arb text"} | ||
|
||
|
||
def test_wit_response(): | ||
from rasa_nlu.emulators.wit import WitEmulator | ||
em = WitEmulator() | ||
data = {"text":"I want italian food","intent":"inform","entities":{"cuisine":"italian"}} | ||
data = {"text": "I want italian food", "intent": "inform", "entities": {"cuisine": "italian"}} | ||
norm = em.normalise_response_json(data) | ||
assert norm == [{'entities': {'cuisine': {'confidence': None, 'type': 'value', 'value': 'italian'}}, 'confidence': None, 'intent': 'inform', '_text': 'I want italian food'}] | ||
|
||
|
||
|
||
assert norm == [ | ||
{'entities': {'cuisine': {'confidence': None, 'type': 'value', 'value': 'italian'}}, 'confidence': None, | ||
'intent': 'inform', '_text': 'I want italian food'}] | ||
|
||
|
||
def test_dummy_request(): | ||
from rasa_nlu.emulators import NoEmulator | ||
em = NoEmulator() | ||
norm = em.normalise_request_json({"text":["arb text"]}) | ||
assert norm == {"text":"arb text"} | ||
norm = em.normalise_request_json({"text": ["arb text"]}) | ||
assert norm == {"text": "arb text"} | ||
|
||
|
||
def test_dummy_response(): | ||
from rasa_nlu.emulators import NoEmulator | ||
em = NoEmulator() | ||
data = {"intent":"greet","text":"hi","entities":{}} | ||
em = NoEmulator() | ||
data = {"intent": "greet", "text": "hi", "entities": {}} | ||
assert em.normalise_response_json(data) == data | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,5 +4,4 @@ spacy==1.2.0 | |
scikit-learn | ||
pathlib | ||
cloudpickle | ||
|
||
|
||
pytest-pep8 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
# pytest PEP8 configuration | ||
[tool:pytest] | ||
pep8maxlinelength = 120 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,14 +1,12 @@ | ||
|
||
|
||
class NoEmulator(object): | ||
def __init__(self): | ||
self.service= None | ||
self.service = None | ||
|
||
def normalise_request_json(self,data): | ||
def normalise_request_json(self, data): | ||
for key, val in data.iteritems(): | ||
if type(val) == list: | ||
data[key] = val[0] | ||
return data | ||
|
||
def normalise_response_json(self,data): | ||
def normalise_response_json(self, data): | ||
return data |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,30 +1,26 @@ | ||
|
||
|
||
class LUISEmulator(object): | ||
def __init__(self): | ||
self.name='luis' | ||
self.name = 'luis' | ||
|
||
def normalise_request_json(self,data): | ||
def normalise_request_json(self, data): | ||
_data = {} | ||
_data["text"]=data['q'][0] | ||
_data["text"] = data['q'][0] | ||
return _data | ||
|
||
def normalise_response_json(self,data): | ||
def normalise_response_json(self, data): | ||
return { | ||
"query": data["text"], | ||
"query": data["text"], | ||
"topScoringIntent": { | ||
"intent": "inform", | ||
"score": None | ||
"intent": "inform", | ||
"score": None | ||
}, | ||
"entities": [ | ||
{ | ||
"entity": e[0], | ||
"type": e[1], | ||
"startIndex": None, | ||
"endIndex": None, | ||
"score": None | ||
} for e in data["entities"] | ||
] | ||
} | ||
|
||
|
||
"entities": [ | ||
{ | ||
"entity": e[0], | ||
"type": e[1], | ||
"startIndex": None, | ||
"endIndex": None, | ||
"score": None | ||
} for e in data["entities"] | ||
] | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,20 +1,20 @@ | ||
|
||
class WitEmulator(object): | ||
def __init__(self): | ||
self.name='wit' | ||
self.name = 'wit' | ||
|
||
def normalise_request_json(self,data): | ||
def normalise_request_json(self, data): | ||
_data = {} | ||
_data["text"]=data['q'][0] | ||
_data["text"] = data['q'][0] | ||
return _data | ||
|
||
def normalise_response_json(self,data): | ||
def normalise_response_json(self, data): | ||
print('plain response {0}'.format(data)) | ||
return [ | ||
{ | ||
"_text": data["text"], | ||
"confidence": None, | ||
"intent": data["intent"], | ||
"entities" : {key:{"confidence":None,"type":"value","value":val} for key,val in data["entities"].items()} | ||
} | ||
{ | ||
"_text": data["text"], | ||
"confidence": None, | ||
"intent": data["intent"], | ||
"entities": {key: {"confidence": None, "type": "value", "value": val} for key, val in | ||
data["entities"].items()} | ||
} | ||
] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,17 +1,18 @@ | ||
from mitie import * | ||
import numpy as np | ||
|
||
|
||
class MITIEFeaturizer(object): | ||
def __init__(self,fe_file): | ||
def __init__(self, fe_file): | ||
self.feature_extractor = total_word_feature_extractor(fe_file) | ||
self.ndim = self.feature_extractor.num_dimensions | ||
def create_bow_vecs(self,sentences): | ||
X=np.zeros((len(sentences),self.ndim)) | ||
|
||
def create_bow_vecs(self, sentences): | ||
X = np.zeros((len(sentences), self.ndim)) | ||
for idx, sent in enumerate(sentences): | ||
tokens = tokenize(sent) | ||
vec = np.zeros(self.ndim) | ||
for token in tokens: | ||
vec += self.feature_extractor.get_feature_vector(token) | ||
X[idx,:] = vec / len(tokens) | ||
X[idx, :] = vec / len(tokens) | ||
return X |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,17 +1,18 @@ | ||
import spacy | ||
import numpy as np | ||
|
||
|
||
class SpacyFeaturizer(object): | ||
def __init__(self,nlp): | ||
def __init__(self, nlp): | ||
self.nlp = nlp | ||
self.ndim = 300 | ||
def create_bow_vecs(self,sentences): | ||
X = np.zeros((len(sentences),self.ndim)) | ||
|
||
def create_bow_vecs(self, sentences): | ||
X = np.zeros((len(sentences), self.ndim)) | ||
for idx, sentence in enumerate(sentences): | ||
doc = self.nlp(sentence) | ||
vec = np.zeros(self.ndim) | ||
for token in doc: | ||
vec += token.vector | ||
X[idx,:] = vec / len(doc) | ||
return X | ||
X[idx, :] = vec / len(doc) | ||
return X |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,27 +1,27 @@ | ||
from mitie import * | ||
from rasa_nlu import Interpreter | ||
|
||
|
||
class MITIEInterpreter(Interpreter): | ||
def __init__(self,intent_classifier=None,entity_extractor=None,feature_extractor=None,**kwargs): | ||
self.extractor = named_entity_extractor(entity_extractor,feature_extractor) | ||
self.classifier = text_categorizer(intent_classifier,feature_extractor) | ||
def get_entities(self,tokens): | ||
def __init__(self, intent_classifier=None, entity_extractor=None, feature_extractor=None, **kwargs): | ||
self.extractor = named_entity_extractor(entity_extractor, feature_extractor) | ||
self.classifier = text_categorizer(intent_classifier, feature_extractor) | ||
|
||
def get_entities(self, tokens): | ||
d = {} | ||
entities = self.extractor.extract_entities(tokens) | ||
for e in entities: | ||
_range = e[0] | ||
d[e[1]] = " ".join(tokens[i] for i in _range) | ||
d[e[1]] = " ".join(tokens[i] for i in _range) | ||
return d | ||
|
||
def get_intent(self,tokens): | ||
label, _ = self.classifier(tokens) # don't use the score | ||
def get_intent(self, tokens): | ||
label, _ = self.classifier(tokens) # don't use the score | ||
return label | ||
|
||
def parse(self,text): | ||
def parse(self, text): | ||
tokens = tokenize(text) | ||
intent = self.get_intent(tokens) | ||
entities = self.get_entities(tokens) | ||
|
||
return {'text':text,'intent':intent,'entities': entities} | ||
|
||
return {'text': text, 'intent': intent, 'entities': entities} |
Oops, something went wrong.