Skip to content

Commit

Permalink
优化代码
Browse files Browse the repository at this point in the history
  • Loading branch information
920232796 committed Aug 18, 2021
1 parent 728077f commit f510ca2
Show file tree
Hide file tree
Showing 18 changed files with 7 additions and 113 deletions.
12 changes: 1 addition & 11 deletions test/auto_title_test.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,4 @@
import torch
import torch.nn as nn
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
from torch.optim import Adam
import pandas as pd
import numpy as np
import os
import json
import time
import bert_seq2seq
import torch
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
from bert_seq2seq.utils import load_bert

Expand Down
15 changes: 2 additions & 13 deletions test/bert_english_autotitle_test.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,15 @@
## 英文自动摘要测试文件
import torch
import torch.nn as nn
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
from torch.optim import Adam
import pandas as pd
import numpy as np
import os
import torch
import glob
import json
import time
import bert_seq2seq
from rouge import Rouge
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
from bert_seq2seq.utils import load_bert
from transformers import AutoTokenizer

tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
word2idx = tokenizer.get_vocab()
auto_title_model = "./state_dict/bert_english_auto_title_model.bin"
# device = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
maxlen = 256

if __name__ == "__main__":
Expand Down
6 changes: 0 additions & 6 deletions test/english_t5_test.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,6 @@
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")

import torch
from bert_seq2seq.utils import load_gpt
from bert_seq2seq.tokenizer import load_chinese_base_vocab
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

import os
from bert_seq2seq.extend_model_method import ExtendModel

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Expand Down
2 changes: 0 additions & 2 deletions test/gpt_ancient_translation_test.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")

import torch
from bert_seq2seq.utils import load_gpt
Expand Down
2 changes: 0 additions & 2 deletions test/gpt_article_continued_test.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")

import torch
from bert_seq2seq.utils import load_gpt
Expand Down
4 changes: 0 additions & 4 deletions test/gpt_english_story_test.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")

import torch
from bert_seq2seq.utils import load_gpt
from bert_seq2seq.tokenizer import load_chinese_base_vocab
from transformers import AutoTokenizer
import os

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

Expand Down
2 changes: 0 additions & 2 deletions test/gpt_explain_dream_test.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")

import torch
from bert_seq2seq.utils import load_gpt
Expand Down
4 changes: 0 additions & 4 deletions test/gpt_test_english.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")

import torch
from bert_seq2seq.utils import load_gpt
from bert_seq2seq.tokenizer import load_chinese_base_vocab
from transformers import AutoTokenizer
import os

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

Expand Down
12 changes: 1 addition & 11 deletions test/nezha_auto_title_test.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,4 @@
import torch
import torch.nn as nn
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
from torch.optim import Adam
import pandas as pd
import numpy as np
import os
import json
import time
import bert_seq2seq
import torch
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
from bert_seq2seq.utils import load_bert

Expand Down
8 changes: 0 additions & 8 deletions test/nezha_relation_extract_test.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,6 @@
import torch
import torch.nn as nn
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
from torch.optim import Adam
import pandas as pd
import numpy as np
import os
import json
import time
import bert_seq2seq
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
from bert_seq2seq.utils import load_bert

Expand Down
12 changes: 1 addition & 11 deletions test/poem_test.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,4 @@
import torch
import torch.nn as nn
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
from torch.optim import Adam
import pandas as pd
import numpy as np
import os
import json
import time
import bert_seq2seq
import torch
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
from bert_seq2seq.utils import load_bert

Expand Down
10 changes: 1 addition & 9 deletions test/relation_extract_test.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,14 @@
import torch
import torch.nn as nn
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
from torch.optim import Adam
import pandas as pd

import numpy as np
import os
import json
import time
import bert_seq2seq
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
from bert_seq2seq.utils import load_bert

relation_extrac_model = "./state_dict/bert_model_relation_extrac.bin"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vocab_path = "./state_dict/roberta_wwm_vocab.txt" # roberta模型字典的位置
model_name = "roberta" # 选择模型名字
# model_path = "./state_dict/bert-base-chinese-pytorch_model.bin" # roberta模型位
# 加载字典
word2idx = load_chinese_base_vocab(vocab_path, simplfied=False)
tokenizer = Tokenizer(word2idx)
Expand Down
2 changes: 0 additions & 2 deletions test/semantic_matching_test.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
import torch
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
from bert_seq2seq.utils import load_bert

Expand Down
2 changes: 0 additions & 2 deletions test/t5_chinese_test.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
import torch
from bert_seq2seq.tokenizer import load_chinese_base_vocab, T5PegasusTokenizer
from transformers.models.mt5.modeling_mt5 import MT5ForConditionalGeneration
Expand Down
6 changes: 0 additions & 6 deletions test/做数学题_test.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,5 @@
import torch
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
import os
import json
import time
import bert_seq2seq
from bert_seq2seq.utils import load_bert

vocab_path = "./state_dict/roberta_wwm_vocab.txt" # roberta模型字典的位置
Expand Down
6 changes: 0 additions & 6 deletions test/新闻标题文本分类_test.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,5 @@
import torch
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
import os
import json
import time
import bert_seq2seq
from bert_seq2seq.utils import load_bert

target = ["财经", "彩票", "房产", "股票", "家居", "教育", "科技", "社会", "时尚", "时政", "体育", "星座", "游戏", "娱乐"]
Expand Down
6 changes: 0 additions & 6 deletions test/粗粒度ner_test.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,5 @@
import torch
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
import os
import json
import time
import bert_seq2seq
from bert_seq2seq.utils import load_bert

target = ["O", "B-LOC", "I-LOC", "B-PER", "I-PER", "B-ORG", "I-ORG"]
Expand Down
9 changes: 1 addition & 8 deletions test/细粒度ner_test.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,5 @@
import torch
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
import os
import json
import time
import bert_seq2seq
from bert_seq2seq.utils import load_bert

target = ["other", "address", "book", "company", "game", "government", "movie", "name", "organization", "position", "scene"]
Expand Down Expand Up @@ -34,8 +28,7 @@ def viterbi_decode(nodes, trans):
M = scores + trans + nodes[l].view(1, -1)
scores, ids = M.max(0)
path = torch.cat((path[:, ids], labels), dim=0)
# print(scores)
# print(scores)

return path[:, scores.argmax()]

def ner_print(model, test_data, device="cpu"):
Expand Down

0 comments on commit f510ca2

Please sign in to comment.