Skip to content

Commit

Permalink
🎨 update pre-commit
Browse files Browse the repository at this point in the history
  • Loading branch information
AlongWY committed Sep 8, 2022
1 parent 785c232 commit 9b03ad8
Show file tree
Hide file tree
Showing 40 changed files with 58 additions and 77 deletions.
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,8 @@ repos:
hooks:
- id: codespell
args:
- --skip=logs/**,data/**,*.ipynb
# - --ignore-words-list=abc,def
- --skip=logs/**,data/**,*.ipynb,rust/**,python/extension/**,python/core/data/**,python/interface/docs/**
- --ignore-words-list=nd,hist

# jupyter notebook cell output clearing
- repo: https://github.com/kynan/nbstripout
Expand Down
Empty file modified python/core/MANIFEST.in
100755 → 100644
Empty file.
24 changes: 12 additions & 12 deletions python/core/ltp_core/datamodules/adapters/segmention.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,35 @@
from ltp_core.datamodules.components.conllu import Conllu
from ltp_core.datamodules.utils.datasets import load_dataset

B = 0
I = 1
M = 1
E = 2
S = 3
PREFIX_B = 0
PREFIX_I = 1
PREFIX_M = 1
PREFIX_E = 2
PREFIX_S = 3


def length2bi(length):
if length == 0:
return []
elif length == 1:
return [B]
return [PREFIX_B]
elif length == 2:
return [B, I]
return [PREFIX_B, PREFIX_I]
else:
return [B] + [I] * (length - 1)
return [PREFIX_B] + [PREFIX_I] * (length - 1)


def length2bmes(length):
if length == 0:
return []
elif length == 1:
return [S]
return [PREFIX_S]
elif length == 2:
return [B, E]
return [PREFIX_B, PREFIX_E]
elif length == 3:
return [B, M, E]
return [PREFIX_B, PREFIX_M, PREFIX_E]
else:
return [B] + [M] * (length - 2) + [E]
return [PREFIX_B] + [PREFIX_M] * (length - 2) + [PREFIX_E]


def tokenize(examples, tokenizer, max_length, length2labels=length2bi):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import numpy

from ltp_core.datamodules.components.srl import Srl
from ltp_core.datamodules.utils.datasets import load_dataset

Expand Down
1 change: 0 additions & 1 deletion python/core/ltp_core/datamodules/components/bio.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from os.path import join

import datasets

from ltp_core.datamodules.utils.iterator import iter_blocks
from ltp_core.datamodules.utils.vocab_helper import vocab_builder

Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/datamodules/components/conllu.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from os.path import join

import datasets

from ltp_core.datamodules.utils.iterator import iter_blocks
from ltp_core.datamodules.utils.vocab_helper import vocab_builder

Expand Down Expand Up @@ -70,7 +69,7 @@ def build_vocabs(data_dir, *files, min_freq=5):
for dep in values[row]
]
counter.update(itertools.chain(*deps))
except:
except Exception as e:
counter.update("_")
else:
counter.update(values[row])
Expand Down
1 change: 0 additions & 1 deletion python/core/ltp_core/datamodules/components/srl.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from os.path import join

import datasets

from ltp_core.datamodules.utils.iterator import iter_blocks
from ltp_core.datamodules.utils.vocab_helper import vocab_builder

Expand Down
5 changes: 2 additions & 3 deletions python/core/ltp_core/datamodules/multi_task_datamodule.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
from typing import Any, Dict, Optional

import datasets
from ltp_core.datamodules.utils.collate import collate
from ltp_core.datamodules.utils.multitask_dataloader import MultiTaskDataloader
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from transformers import AutoTokenizer

from ltp_core.datamodules.utils.collate import collate
from ltp_core.datamodules.utils.multitask_dataloader import MultiTaskDataloader


class MultiTaskDataModule(LightningDataModule):
"""LightningDataModule for LTP datasets.
Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/datamodules/task_datamodule.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
from typing import Any, Dict, Optional

import datasets
from ltp_core.datamodules.utils.collate import collate
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from transformers import AutoTokenizer

from ltp_core.datamodules.utils.collate import collate


class TaskDataModule(LightningDataModule):
"""LightningDataModule for LTP datasets.
Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,11 @@
from typing import List, Tuple

import hydra
from ltp_core import utils
from omegaconf import DictConfig
from pytorch_lightning import LightningDataModule, LightningModule, Trainer
from pytorch_lightning.loggers import LightningLoggerBase

from ltp_core import utils

log = utils.get_pylogger(__name__)


Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/models/components/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,9 @@
# Author: Yunlong Feng <[email protected]>
from collections import namedtuple

from torch import nn

from ltp_core.models.nn.biaffine import Biaffine
from ltp_core.models.nn.mlp import MLP
from torch import nn

GraphResult = namedtuple("GraphResult", ["arc_logits", "rel_logits", "attention_mask"])

Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/models/components/sent.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from collections import namedtuple

from torch import nn

from ltp_core.models.nn.mlp import MLP
from torch import nn

SentClassifierResult = namedtuple("SentClassifierResult", ["logits"])

Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/models/components/token.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
from collections import namedtuple
from typing import Optional

from torch import nn

from ltp_core.models.nn.biaffine import Biaffine
from ltp_core.models.nn.crf import CRF
from ltp_core.models.nn.mlp import MLP
from ltp_core.models.nn.relative_transformer import RelativeTransformer
from torch import nn

TokenClassifierResult = namedtuple("TokenClassifierResult", ["logits", "attention_mask", "crf"])

Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/models/criterion/graph.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
import torch
from ltp_core.models.components.graph import GraphResult
from torch import Tensor
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, Module

from ltp_core.models.components.graph import GraphResult


class DEPLoss(Module):
def __init__(self, loss_interpolation=0.4):
Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/models/criterion/sent.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from ltp_core.models.components.sent import SentClassifierResult
from torch import Tensor
from torch.nn import CrossEntropyLoss, Module

from ltp_core.models.components.sent import SentClassifierResult


class ClassificationLoss(Module):
def forward(self, result: SentClassifierResult, labels: Tensor, **kwargs) -> Tensor:
Expand Down
5 changes: 2 additions & 3 deletions python/core/ltp_core/models/criterion/token.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
from typing import Callable

import torch
from torch import Tensor
from torch.nn import CrossEntropyLoss, Module

from ltp_core.models.components.token import TokenClassifierResult
from ltp_core.models.functional.distill import kd_ce_loss, kd_mse_loss
from torch import Tensor
from torch.nn import CrossEntropyLoss, Module


class TokenLoss(Module):
Expand Down
5 changes: 2 additions & 3 deletions python/core/ltp_core/models/lit_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,12 @@
from typing import Any, List

import torch
from ltp_core.models.ltp_model import LTPModule
from ltp_core.models.utils import instantiate_omega as instantiate
from pytorch_lightning import LightningModule
from torch.nn import ModuleDict
from torchmetrics import MeanMetric, MetricCollection

from ltp_core.models.ltp_model import LTPModule
from ltp_core.models.utils import instantiate_omega as instantiate


class LTPLitModule(LightningModule):
"""LightningModule for ltp_core.
Expand Down
5 changes: 2 additions & 3 deletions python/core/ltp_core/models/metrics/graph.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
from typing import Any, Optional

import torch
from torch import Tensor, tensor
from torchmetrics import Metric

from ltp_core.algorithms import eisner
from ltp_core.models.components.graph import GraphResult
from torch import Tensor, tensor
from torchmetrics import Metric


class DEPLas(Metric):
Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/models/metrics/sent.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
from typing import Optional

from ltp_core.models.components.sent import SentClassifierResult
from torch import Tensor
from torchmetrics import Accuracy

from ltp_core.models.components.sent import SentClassifierResult


class ClsAccuracy(Accuracy):
is_differentiable: bool = False
Expand Down
5 changes: 2 additions & 3 deletions python/core/ltp_core/models/metrics/token.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
from typing import Any, List, Optional, Union

import torch
from torch import Tensor, tensor
from torchmetrics import Accuracy, Metric

from ltp_core.algorithms import get_entities
from ltp_core.models.components.token import TokenClassifierResult
from torch import Tensor, tensor
from torchmetrics import Accuracy, Metric


class TokenAccuracy(Accuracy):
Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/models/nn/relative_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,8 @@

import torch
import torch.nn.functional as F
from torch import Tensor, nn

from ltp_core.models.nn.mlp import MLP
from torch import Tensor, nn


class RelativeEmbedding(nn.Module):
Expand Down
2 changes: 1 addition & 1 deletion python/core/ltp_core/models/optimization/layer_lrs.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def get_layer_lrs_with_crf(
elif m.group(1) == "encoder":
depth = int(m.group(3)) + 1
else:
raise Exception("Not Recommand!!!")
raise Exception("Not Recommend!!!")

if is_transformer and any(
x in name for x in ["bias", "LayerNorm.bias", "LayerNorm.weight"]
Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,11 @@

import hydra
import pytorch_lightning as pl
from ltp_core import utils
from omegaconf import DictConfig
from pytorch_lightning import Callback, LightningDataModule, LightningModule, Trainer
from pytorch_lightning.loggers import LightningLoggerBase

from ltp_core import utils

log = utils.get_pylogger(__name__)


Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/utils/rich_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,11 @@
import rich.syntax
import rich.tree
from hydra.core.hydra_config import HydraConfig
from ltp_core.utils import pylogger
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning.utilities import rank_zero_only
from rich.prompt import Prompt

from ltp_core.utils import pylogger

log = pylogger.get_pylogger(__name__)


Expand Down
3 changes: 1 addition & 2 deletions python/core/ltp_core/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,12 @@
from typing import Any, Callable, Dict, List

import hydra
from ltp_core.utils import pylogger, rich_utils
from omegaconf import DictConfig
from pytorch_lightning import Callback
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.utilities import rank_zero_only

from ltp_core.utils import pylogger, rich_utils

log = pylogger.get_pylogger(__name__)


Expand Down
1 change: 0 additions & 1 deletion python/core/tests/helpers/run_if.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
import torch
from packaging.version import Version
from pkg_resources import get_distribution

from tests.helpers.package_available import (
_COMET_AVAILABLE,
_DEEPSPEED_AVAILABLE,
Expand Down
1 change: 0 additions & 1 deletion python/core/tests/helpers/run_sh_command.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from typing import List

import pytest

from tests.helpers.package_available import _SH_AVAILABLE

if _SH_AVAILABLE:
Expand Down
3 changes: 1 addition & 2 deletions python/core/tests/test_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,9 @@

import pytest
from hydra.core.hydra_config import HydraConfig
from omegaconf import open_dict

from ltp_core.eval import evaluate
from ltp_core.train import train
from omegaconf import open_dict


@pytest.mark.slow
Expand Down
1 change: 0 additions & 1 deletion python/core/tests/test_sweeps.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import pytest

from tests.helpers.run_if import RunIf
from tests.helpers.run_sh_command import run_sh_command

Expand Down
3 changes: 1 addition & 2 deletions python/core/tests/test_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,8 @@

import pytest
from hydra.core.hydra_config import HydraConfig
from omegaconf import open_dict

from ltp_core.train import train
from omegaconf import open_dict
from tests.helpers.run_if import RunIf


Expand Down
9 changes: 8 additions & 1 deletion python/extension/examples/legacy_train.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,11 @@
from ltp_extension.perceptron import Algorithm, CWSModel, CWSTrainer, Model, ModelType, Trainer
from ltp_extension.perceptron import (
Algorithm,
CWSModel,
CWSTrainer,
Model,
ModelType,
Trainer,
)


def train_cws():
Expand Down
Empty file modified python/interface/MANIFEST.in
100755 → 100644
Empty file.
Empty file modified python/interface/docs/introduction.rst
100755 → 100644
Empty file.
Empty file modified python/interface/docs/performance.rst
100755 → 100644
Empty file.
Empty file modified python/interface/docs/quickstart.rst
100755 → 100644
Empty file.
1 change: 1 addition & 0 deletions python/interface/examples/simple.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import torch

from ltp import LTP


Expand Down
Empty file modified python/interface/ltp/__init__.py
100755 → 100644
Empty file.
Loading

0 comments on commit 9b03ad8

Please sign in to comment.