Skip to content

Commit

Permalink
0.20.0 release (guillaume-be#327)
Browse files Browse the repository at this point in the history
* Relax dependencies, fix doctests

* Bump tokio for future rust compat, fix tests

* Updated changelog
  • Loading branch information
guillaume-be authored Jan 22, 2023
1 parent 0fc5ce6 commit f1b8409
Show file tree
Hide file tree
Showing 14 changed files with 35 additions and 34 deletions.
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ All notable changes to this project will be documented in this file. The format

## [Unreleased]

## [0.20.0] - 2023-01-21
## Added
- Addition of All-MiniLM-L6-V2 model weights
- Addition of Keyword/Keyphrases extraction pipeline based on KeyBERT (https://github.com/MaartenGr/KeyBERT)
Expand All @@ -21,7 +22,7 @@ All notable changes to this project will be documented in this file. The format
- Fixed configuration check for RoBERTa models for sentence classification.
- Fixed a bug causing the input prompt to be truncated for text generation if the prompt length was longer than `max_length`

## [0.18.0] - 2022-07-24
## [0.19.0] - 2022-07-24
## Added
- Support for sentence embeddings models and pipelines, based on [SentenceTransformers](https://www.sbert.net).

Expand Down
40 changes: 20 additions & 20 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
[package]
name = "rust-bert"
version = "0.19.0"
version = "0.20.0"
authors = ["Guillaume Becquin <[email protected]>"]
edition = "2018"
description = "Ready-to-use NLP pipelines and transformer-based models (BERT, DistilBERT, GPT2,...)"
description = "Ready-to-use NLP pipelines and language models"
repository = "https://github.com/guillaume-be/rust-bert"
documentation = "https://docs.rs/rust-bert"
license = "Apache-2.0"
Expand Down Expand Up @@ -71,23 +71,23 @@ features = ["doc-only"]
[dependencies]
rust_tokenizers = "~7.0.2"
tch = "~0.10.1"
serde_json = "1.0.82"
serde = { version = "1.0.140", features = ["derive"] }
ordered-float = "3.0.0"
uuid = { version = "1.1.2", features = ["v4"] }
thiserror = "1.0.31"
half = "2.1.0"
regex = "1.6.0"

cached-path = { version = "0.6.0", optional = true }
dirs = { version = "4.0.0", optional = true }
lazy_static = { version = "1.4.0", optional = true }
serde_json = "1"
serde = { version = "1", features = ["derive"] }
ordered-float = "3"
uuid = { version = "1", features = ["v4"] }
thiserror = "1"
half = "2"
regex = "1.6"

cached-path = { version = "0.6", optional = true }
dirs = { version = "4", optional = true }
lazy_static = { version = "1", optional = true }

[dev-dependencies]
anyhow = "1.0.58"
csv = "1.1.6"
criterion = "0.3.6"
tokio = { version = "1.20.0", features = ["sync", "rt-multi-thread", "macros"] }
torch-sys = "0.10.0"
tempfile = "3.3.0"
itertools = "0.10.3"
anyhow = "1"
csv = "1"
criterion = "0.4"
tokio = { version = "1.24", features = ["sync", "rt-multi-thread", "macros"] }
torch-sys = "=0.10.0"
tempfile = "3"
itertools = "0.10"
2 changes: 1 addition & 1 deletion src/albert/albert_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,7 @@ impl AlbertForSequenceClassification {
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = AlbertConfig::from_file(config_path);
/// # let albert_model: AlbertForSequenceClassification = AlbertForSequenceClassification::new(&vs.root(), &config);
/// # let albert_model: AlbertForSequenceClassification = AlbertForSequenceClassification::new(&vs.root(), &config).unwrap();
/// let (batch_size, sequence_length) = (64, 128);
/// let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
/// let mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
Expand Down
2 changes: 1 addition & 1 deletion src/bart/bart_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -829,7 +829,7 @@ impl BartForSequenceClassification {
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = BartConfig::from_file(config_path);
/// # let bart_model: BartForSequenceClassification = BartForSequenceClassification::new(&vs.root(), &config);
/// # let bart_model: BartForSequenceClassification = BartForSequenceClassification::new(&vs.root(), &config).unwrap();;
/// let (batch_size, source_sequence_length, target_sequence_length) = (64, 128, 56);
/// let input_tensor = Tensor::rand(&[batch_size, source_sequence_length], (Int64, device));
/// let target_tensor = Tensor::rand(&[batch_size, target_sequence_length], (Int64, device));
Expand Down
2 changes: 1 addition & 1 deletion src/bert/bert_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -747,7 +747,7 @@ impl BertForSequenceClassification {
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = BertConfig::from_file(config_path);
/// # let bert_model = BertForSequenceClassification::new(&vs.root(), &config);
/// # let bert_model = BertForSequenceClassification::new(&vs.root(), &config).unwrap();;
/// let (batch_size, sequence_length) = (64, 128);
/// let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Kind::Int64, device));
/// let mask = Tensor::zeros(&[batch_size, sequence_length], (Kind::Int64, device));
Expand Down
2 changes: 1 addition & 1 deletion src/deberta/deberta_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -805,7 +805,7 @@ impl DebertaForSequenceClassification {
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = DebertaConfig::from_file(config_path);
/// # let model = DebertaForSequenceClassification::new(&vs.root(), &config);
/// # let model = DebertaForSequenceClassification::new(&vs.root(), &config).unwrap();;
/// let (batch_size, sequence_length) = (64, 128);
/// let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Kind::Int64, device));
/// let mask = Tensor::zeros(&[batch_size, sequence_length], (Kind::Int64, device));
Expand Down
2 changes: 1 addition & 1 deletion src/deberta_v2/deberta_v2_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,7 @@ impl DebertaV2ForSequenceClassification {
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = DebertaV2Config::from_file(config_path);
/// # let model = DebertaV2ForSequenceClassification::new(&vs.root(), &config);
/// # let model = DebertaV2ForSequenceClassification::new(&vs.root(), &config).unwrap();;
/// let (batch_size, sequence_length) = (64, 128);
/// let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Kind::Int64, device));
/// let mask = Tensor::zeros(&[batch_size, sequence_length], (Kind::Int64, device));
Expand Down
2 changes: 1 addition & 1 deletion src/distilbert/distilbert_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ impl DistilBertModelClassifier {
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = DistilBertConfig::from_file(config_path);
/// # let distilbert_model: DistilBertModelClassifier = DistilBertModelClassifier::new(&vs.root(), &config);
/// # let distilbert_model: DistilBertModelClassifier = DistilBertModelClassifier::new(&vs.root(), &config).unwrap();;
/// let (batch_size, sequence_length) = (64, 128);
/// let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
/// let mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
Expand Down
4 changes: 2 additions & 2 deletions src/fnet/fnet_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -563,7 +563,7 @@ impl FNetForSequenceClassification {
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = FNetConfig::from_file(config_path);
/// let model = FNetForSequenceClassification::new(&vs.root(), &config);
/// let model = FNetForSequenceClassification::new(&vs.root(), &config).unwrap();
/// let (batch_size, sequence_length) = (64, 128);
/// let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
/// let token_type_ids = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
Expand Down Expand Up @@ -972,7 +972,7 @@ impl FNetForQuestionAnswering {
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = FNetConfig::from_file(config_path);
/// let model = FNetForQuestionAnswering::new(&vs.root(), &config).unwrap();
/// let model = FNetForQuestionAnswering::new(&vs.root(), &config);
/// let (batch_size, sequence_length) = (64, 128);
/// let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
/// let token_type_ids = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
Expand Down
2 changes: 1 addition & 1 deletion src/longformer/longformer_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -925,7 +925,7 @@ impl LongformerForSequenceClassification {
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = LongformerConfig::from_file(config_path);
/// let longformer_model = LongformerForSequenceClassification::new(&vs.root(), &config);
/// let longformer_model = LongformerForSequenceClassification::new(&vs.root(), &config).unwrap();
/// let (batch_size, sequence_length, target_sequence_length) = (64, 128, 32);
/// let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
/// let attention_mask = Tensor::ones(&[batch_size, sequence_length], (Int64, device));
Expand Down
2 changes: 1 addition & 1 deletion src/mbart/mbart_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -652,7 +652,7 @@ impl MBartForSequenceClassification {
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = MBartConfig::from_file(config_path);
/// # let mbart_model: MBartForSequenceClassification = MBartForSequenceClassification::new(&vs.root(), &config);
/// # let mbart_model: MBartForSequenceClassification = MBartForSequenceClassification::new(&vs.root(), &config).unwrap();;
/// let (batch_size, source_sequence_length, target_sequence_length) = (64, 128, 56);
/// let input_tensor = Tensor::rand(&[batch_size, source_sequence_length], (Int64, device));
/// let target_tensor = Tensor::rand(&[batch_size, target_sequence_length], (Int64, device));
Expand Down
2 changes: 1 addition & 1 deletion src/mobilebert/mobilebert_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -755,7 +755,7 @@ impl MobileBertForSequenceClassification {
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = MobileBertConfig::from_file(config_path);
/// let model = MobileBertForSequenceClassification::new(&vs.root(), &config);
/// let model = MobileBertForSequenceClassification::new(&vs.root(), &config).unwrap();
/// let (batch_size, sequence_length) = (64, 128);
/// let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
/// let attention_mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
Expand Down
2 changes: 1 addition & 1 deletion src/pipelines/masked_language.rs
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ impl MaskedLanguageModel {
/// ];
///
/// // Run model
/// let output = mask_language_model.predict(&input);
/// let output = mask_language_model.predict(&input)?;
/// for word in output {
/// println!("{:?}", word);
/// }
Expand Down
2 changes: 1 addition & 1 deletion src/roberta/roberta_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -549,7 +549,7 @@ impl RobertaForSequenceClassification {
/// # let device = Device::Cpu;
/// # let vs = nn::VarStore::new(device);
/// # let config = BertConfig::from_file(config_path);
/// # let roberta_model = RobertaForSequenceClassification::new(&vs.root(), &config);
/// # let roberta_model = RobertaForSequenceClassification::new(&vs.root(), &config).unwrap();;
/// let (batch_size, sequence_length) = (64, 128);
/// let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
/// let mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
Expand Down

0 comments on commit f1b8409

Please sign in to comment.