Skip to content

Commit

Permalink
🎨 reformat codes
Browse files Browse the repository at this point in the history
  • Loading branch information
AlongWY committed Sep 18, 2022
1 parent 662702e commit ab50b90
Show file tree
Hide file tree
Showing 30 changed files with 90 additions and 63 deletions.
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import numpy

from ltp_core.datamodules.components.srl import Srl
from ltp_core.datamodules.utils.datasets import load_dataset

Expand Down
1 change: 1 addition & 0 deletions python/core/ltp_core/datamodules/components/bio.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from os.path import join

import datasets

from ltp_core.datamodules.utils.iterator import iter_blocks
from ltp_core.datamodules.utils.vocab_helper import vocab_builder

Expand Down
1 change: 1 addition & 0 deletions python/core/ltp_core/datamodules/components/conllu.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from os.path import join

import datasets

from ltp_core.datamodules.utils.iterator import iter_blocks
from ltp_core.datamodules.utils.vocab_helper import vocab_builder

Expand Down
1 change: 1 addition & 0 deletions python/core/ltp_core/datamodules/components/srl.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from os.path import join

import datasets

from ltp_core.datamodules.utils.iterator import iter_blocks
from ltp_core.datamodules.utils.vocab_helper import vocab_builder

Expand Down
5 changes: 3 additions & 2 deletions python/core/ltp_core/datamodules/multi_task_datamodule.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
from typing import Any, Dict, Optional

import datasets
from ltp_core.datamodules.utils.collate import collate
from ltp_core.datamodules.utils.multitask_dataloader import MultiTaskDataloader
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from transformers import AutoTokenizer

from ltp_core.datamodules.utils.collate import collate
from ltp_core.datamodules.utils.multitask_dataloader import MultiTaskDataloader


class MultiTaskDataModule(LightningDataModule):
"""LightningDataModule for LTP datasets.
Expand Down
3 changes: 2 additions & 1 deletion python/core/ltp_core/datamodules/task_datamodule.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
from typing import Any, Dict, Optional

import datasets
from ltp_core.datamodules.utils.collate import collate
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from transformers import AutoTokenizer

from ltp_core.datamodules.utils.collate import collate


class TaskDataModule(LightningDataModule):
"""LightningDataModule for LTP datasets.
Expand Down
3 changes: 2 additions & 1 deletion python/core/ltp_core/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,12 @@
from typing import List, Tuple

import hydra
from ltp_core import utils
from omegaconf import DictConfig
from pytorch_lightning import LightningDataModule, LightningModule, Trainer
from pytorch_lightning.loggers import LightningLoggerBase

from ltp_core import utils

log = utils.get_pylogger(__name__)


Expand Down
3 changes: 2 additions & 1 deletion python/core/ltp_core/models/components/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
# Author: Yunlong Feng <[email protected]>
from collections import namedtuple

from torch import nn

from ltp_core.models.nn.biaffine import Biaffine
from ltp_core.models.nn.mlp import MLP
from torch import nn

GraphResult = namedtuple("GraphResult", ["arc_logits", "rel_logits", "attention_mask"])

Expand Down
3 changes: 2 additions & 1 deletion python/core/ltp_core/models/components/sent.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
from collections import namedtuple

from ltp_core.models.nn.mlp import MLP
from torch import nn

from ltp_core.models.nn.mlp import MLP

SentClassifierResult = namedtuple("SentClassifierResult", ["logits"])


Expand Down
3 changes: 2 additions & 1 deletion python/core/ltp_core/models/components/token.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
from collections import namedtuple
from typing import Optional

from torch import nn

from ltp_core.models.nn.biaffine import Biaffine
from ltp_core.models.nn.crf import CRF
from ltp_core.models.nn.mlp import MLP
from ltp_core.models.nn.relative_transformer import RelativeTransformer
from torch import nn

TokenClassifierResult = namedtuple("TokenClassifierResult", ["logits", "attention_mask", "crf"])

Expand Down
3 changes: 2 additions & 1 deletion python/core/ltp_core/models/criterion/graph.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import torch
from ltp_core.models.components.graph import GraphResult
from torch import Tensor
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, Module

from ltp_core.models.components.graph import GraphResult


class DEPLoss(Module):
def __init__(self, loss_interpolation=0.4):
Expand Down
3 changes: 2 additions & 1 deletion python/core/ltp_core/models/criterion/sent.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from ltp_core.models.components.sent import SentClassifierResult
from torch import Tensor
from torch.nn import CrossEntropyLoss, Module

from ltp_core.models.components.sent import SentClassifierResult


class ClassificationLoss(Module):
def forward(self, result: SentClassifierResult, labels: Tensor, **kwargs) -> Tensor:
Expand Down
5 changes: 3 additions & 2 deletions python/core/ltp_core/models/criterion/token.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
from typing import Callable

import torch
from ltp_core.models.components.token import TokenClassifierResult
from ltp_core.models.functional.distill import kd_ce_loss, kd_mse_loss
from torch import Tensor
from torch.nn import CrossEntropyLoss, Module

from ltp_core.models.components.token import TokenClassifierResult
from ltp_core.models.functional.distill import kd_ce_loss, kd_mse_loss


class TokenLoss(Module):
def forward(self, result: TokenClassifierResult, labels: Tensor, *args, **kwargs) -> Tensor:
Expand Down
5 changes: 3 additions & 2 deletions python/core/ltp_core/models/lit_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,13 @@
from typing import Any, List

import torch
from ltp_core.models.ltp_model import LTPModule
from ltp_core.models.utils import instantiate_omega as instantiate
from pytorch_lightning import LightningModule
from torch.nn import ModuleDict
from torchmetrics import MeanMetric, MetricCollection

from ltp_core.models.ltp_model import LTPModule
from ltp_core.models.utils import instantiate_omega as instantiate


class LTPLitModule(LightningModule):
"""LightningModule for ltp_core.
Expand Down
5 changes: 3 additions & 2 deletions python/core/ltp_core/models/metrics/graph.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
from typing import Any, Optional

import torch
from ltp_core.algorithms import eisner
from ltp_core.models.components.graph import GraphResult
from torch import Tensor, tensor
from torchmetrics import Metric

from ltp_core.algorithms import eisner
from ltp_core.models.components.graph import GraphResult


class DEPLas(Metric):
is_differentiable: bool = False
Expand Down
3 changes: 2 additions & 1 deletion python/core/ltp_core/models/metrics/sent.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
from typing import Optional

from ltp_core.models.components.sent import SentClassifierResult
from torch import Tensor
from torchmetrics import Accuracy

from ltp_core.models.components.sent import SentClassifierResult


class ClsAccuracy(Accuracy):
is_differentiable: bool = False
Expand Down
5 changes: 3 additions & 2 deletions python/core/ltp_core/models/metrics/token.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
from typing import Any, List, Optional, Union

import torch
from ltp_core.algorithms import get_entities
from ltp_core.models.components.token import TokenClassifierResult
from torch import Tensor, tensor
from torchmetrics import Accuracy, Metric

from ltp_core.algorithms import get_entities
from ltp_core.models.components.token import TokenClassifierResult


class TokenAccuracy(Accuracy):
is_differentiable: bool = False
Expand Down
3 changes: 2 additions & 1 deletion python/core/ltp_core/models/nn/relative_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,10 @@

import torch
import torch.nn.functional as F
from ltp_core.models.nn.mlp import MLP
from torch import Tensor, nn

from ltp_core.models.nn.mlp import MLP


class RelativeEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Expand Down
3 changes: 2 additions & 1 deletion python/core/ltp_core/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,12 @@

import hydra
import pytorch_lightning as pl
from ltp_core import utils
from omegaconf import DictConfig
from pytorch_lightning import Callback, LightningDataModule, LightningModule, Trainer
from pytorch_lightning.loggers import LightningLoggerBase

from ltp_core import utils

log = utils.get_pylogger(__name__)


Expand Down
3 changes: 2 additions & 1 deletion python/core/ltp_core/utils/rich_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,12 @@
import rich.syntax
import rich.tree
from hydra.core.hydra_config import HydraConfig
from ltp_core.utils import pylogger
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning.utilities import rank_zero_only
from rich.prompt import Prompt

from ltp_core.utils import pylogger

log = pylogger.get_pylogger(__name__)


Expand Down
3 changes: 2 additions & 1 deletion python/core/ltp_core/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@
from typing import Any, Callable, Dict, List

import hydra
from ltp_core.utils import pylogger, rich_utils
from omegaconf import DictConfig
from pytorch_lightning import Callback
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.utilities import rank_zero_only

from ltp_core.utils import pylogger, rich_utils

log = pylogger.get_pylogger(__name__)


Expand Down
1 change: 1 addition & 0 deletions python/core/tests/helpers/run_if.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import torch
from packaging.version import Version
from pkg_resources import get_distribution

from tests.helpers.package_available import (
_COMET_AVAILABLE,
_DEEPSPEED_AVAILABLE,
Expand Down
1 change: 1 addition & 0 deletions python/core/tests/helpers/run_sh_command.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from typing import List

import pytest

from tests.helpers.package_available import _SH_AVAILABLE

if _SH_AVAILABLE:
Expand Down
3 changes: 2 additions & 1 deletion python/core/tests/test_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@

import pytest
from hydra.core.hydra_config import HydraConfig
from omegaconf import open_dict

from ltp_core.eval import evaluate
from ltp_core.train import train
from omegaconf import open_dict


@pytest.mark.slow
Expand Down
1 change: 1 addition & 0 deletions python/core/tests/test_sweeps.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import pytest

from tests.helpers.run_if import RunIf
from tests.helpers.run_sh_command import run_sh_command

Expand Down
3 changes: 2 additions & 1 deletion python/core/tests/test_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,9 @@

import pytest
from hydra.core.hydra_config import HydraConfig
from ltp_core.train import train
from omegaconf import open_dict

from ltp_core.train import train
from tests.helpers.run_if import RunIf


Expand Down
9 changes: 1 addition & 8 deletions python/extension/examples/legacy_train.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,4 @@
from ltp_extension.perceptron import (
Algorithm,
CWSModel,
CWSTrainer,
Model,
ModelType,
Trainer,
)
from ltp_extension.perceptron import Algorithm, CWSModel, CWSTrainer, Model, ModelType, Trainer


def train_cws():
Expand Down
32 changes: 32 additions & 0 deletions python/interface/examples/issues.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from ltp import LTP


def issue590():
ltp = LTP("LTP/tiny")
ltp.add_words(words=["[ENT]"])
print(ltp.pipeline(["[ENT] Info"], tasks=["cws"]))

ltp.add_words(words=["[EOS]"])
print(ltp.pipeline(["[EOS] Info"], tasks=["cws"]))


def issue592():
legacy_ltp = LTP("LTP/legacy")

legacy_ltp.add_words(words=["SCSG", "IP地址"])
print(legacy_ltp.pipeline(["SCSGIP地址"], tasks=["cws"]))

neural_ltp = LTP("LTP/tiny")

# not bug, but not work because of the bert tokenizer
neural_ltp.add_words(words=["SCSG", "IP地址"])
print(neural_ltp.pipeline(["SCSGIP地址"], tasks=["cws"]))


def main():
issue590()
issue592()


if __name__ == "__main__":
main()
29 changes: 2 additions & 27 deletions python/interface/examples/simple.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import torch

from ltp import LTP


Expand Down Expand Up @@ -48,33 +47,9 @@ def neural():
print(result.sdp)


def issue590():
ltp = LTP("LTP/tiny")
ltp.add_words(words=["[ENT]"])
print(ltp.pipeline(["[ENT] Info"], tasks=["cws"]))

ltp.add_words(words=["[EOS]"])
print(ltp.pipeline(["[EOS] Info"], tasks=["cws"]))


def issue592():
legacy_ltp = LTP("LTP/legacy")

legacy_ltp.add_words(words=['SCSG', 'IP地址'])
print(legacy_ltp.pipeline(['SCSGIP地址'], tasks=["cws"]))

neural_ltp = LTP("LTP/tiny")

# not bug, but not work because of the bert tokenizer
neural_ltp.add_words(words=['SCSG', 'IP地址'])
print(neural_ltp.pipeline(['SCSGIP地址'], tasks=["cws"]))


def main():
# legacy()
# neural()
# issue590()
issue592()
legacy()
neural()


if __name__ == "__main__":
Expand Down
Loading

0 comments on commit ab50b90

Please sign in to comment.