Skip to content

Commit

Permalink
Update the rustfmt config.
Browse files Browse the repository at this point in the history
  • Loading branch information
LaurentMazare committed Jan 2, 2022
1 parent e75e1bb commit bb177d4
Show file tree
Hide file tree
Showing 71 changed files with 2,982 additions and 10,149 deletions.
19 changes: 4 additions & 15 deletions examples/cifar/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,7 @@ use tch::nn::{FuncT, ModuleT, OptimizerConfig, SequentialT};
use tch::{nn, Device};

fn conv_bn(vs: &nn::Path, c_in: i64, c_out: i64) -> SequentialT {
let conv2d_cfg = nn::ConvConfig {
padding: 1,
bias: false,
..Default::default()
};
let conv2d_cfg = nn::ConvConfig { padding: 1, bias: false, ..Default::default() };
nn::seq_t()
.add(nn::conv2d(vs, c_in, c_out, 3, conv2d_cfg))
.add(nn::batch_norm2d(vs, c_out, Default::default()))
Expand Down Expand Up @@ -59,20 +55,13 @@ pub fn main() -> Result<()> {
let m = tch::vision::cifar::load_dir("data")?;
let vs = nn::VarStore::new(Device::cuda_if_available());
let net = fast_resnet(&vs.root());
let mut opt = nn::Sgd {
momentum: 0.9,
dampening: 0.,
wd: 5e-4,
nesterov: true,
}
.build(&vs, 0.)?;
let mut opt =
nn::Sgd { momentum: 0.9, dampening: 0., wd: 5e-4, nesterov: true }.build(&vs, 0.)?;
for epoch in 1..150 {
opt.set_lr(learning_rate(epoch));
for (bimages, blabels) in m.train_iter(64).shuffle().to_device(vs.device()) {
let bimages = tch::vision::dataset::augmentation(&bimages, true, 4, 8);
let loss = net
.forward_t(&bimages, true)
.cross_entropy_for_logits(&blabels);
let loss = net.forward_t(&bimages, true).cross_entropy_for_logits(&blabels);
opt.backward_step(&loss);
}
let test_accuracy =
Expand Down
15 changes: 3 additions & 12 deletions examples/custom-optimizer/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,7 @@ const LABELS: i64 = 10;

fn net(vs: &nn::Path) -> impl Module {
nn::seq()
.add(nn::linear(
vs / "layer1",
IMAGE_DIM,
HIDDEN_NODES,
Default::default(),
))
.add(nn::linear(vs / "layer1", IMAGE_DIM, HIDDEN_NODES, Default::default()))
.add_fn(|xs| xs.relu())
.add(nn::linear(vs, HIDDEN_NODES, LABELS, Default::default()))
}
Expand All @@ -32,18 +27,14 @@ pub fn run() -> Result<()> {
let mut opt = sparse_adam::SparseAdam::new(&vs, 5e-3, 0.9, 0.999, 1e-8, force_sparse);

for epoch in 1..200 {
let loss = net
.forward(&m.train_images)
.cross_entropy_for_logits(&m.train_labels);
let loss = net.forward(&m.train_images).cross_entropy_for_logits(&m.train_labels);

// call custom optimizer
opt.zero_grad();
loss.backward();
opt.step();

let test_accuracy = net
.forward(&m.test_images)
.accuracy_for_logits(&m.test_labels);
let test_accuracy = net.forward(&m.test_images).accuracy_for_logits(&m.test_labels);
println!(
"epoch: {:4} train loss: {:8.5} test acc: {:5.2}%",
epoch,
Expand Down
18 changes: 3 additions & 15 deletions examples/custom-optimizer/sparse_adam.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,15 +62,7 @@ impl SparseAdam {
.map(|x| Buffer::new(&x.tensor.size()))
.collect();

SparseAdam {
lr,
beta1,
beta2,
eps,
force_sparse,
vars,
buffers,
}
SparseAdam { lr, beta1, beta2, eps, force_sparse, vars, buffers }
}

/// Ensure that the gradient update is not part of the autograd routine
Expand Down Expand Up @@ -113,12 +105,8 @@ impl SparseAdam {
let update_second_moment = (1.0 - self.beta2)
* (&values * &values - buffer.second_moment.index_select(0, &indices));

let _ = buffer
.first_moment
.index_add_(0, &indices, &update_first_moment);
let _ = buffer
.second_moment
.index_add_(0, &indices, &update_second_moment);
let _ = buffer.first_moment.index_add_(0, &indices, &update_first_moment);
let _ = buffer.second_moment.index_add_(0, &indices, &update_second_moment);

// first part of update step -lr * m_t / (1-b_1^t)
let part1 =
Expand Down
32 changes: 5 additions & 27 deletions examples/gan/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,22 +12,12 @@ const LEARNING_RATE: f64 = 1e-4;
const BATCHES: i64 = 100000000;

fn tr2d(p: nn::Path, c_in: i64, c_out: i64, padding: i64, stride: i64) -> nn::ConvTranspose2D {
let cfg = nn::ConvTransposeConfig {
stride,
padding,
bias: false,
..Default::default()
};
let cfg = nn::ConvTransposeConfig { stride, padding, bias: false, ..Default::default() };
nn::conv_transpose2d(&p, c_in, c_out, 4, cfg)
}

fn conv2d(p: nn::Path, c_in: i64, c_out: i64, padding: i64, stride: i64) -> nn::Conv2D {
let cfg = nn::ConvConfig {
stride,
padding,
bias: false,
..Default::default()
};
let cfg = nn::ConvConfig { stride, padding, bias: false, ..Default::default() };
nn::conv2d(&p, c_in, c_out, 4, cfg)
}

Expand Down Expand Up @@ -79,12 +69,7 @@ fn image_matrix(imgs: &Tensor, sz: i64) -> Result<Tensor> {
let imgs = ((imgs + 1.) * 127.5).clamp(0., 255.).to_kind(Kind::Uint8);
let mut ys: Vec<Tensor> = vec![];
for i in 0..sz {
ys.push(Tensor::cat(
&(0..sz)
.map(|j| imgs.narrow(0, 4 * i + j, 1))
.collect::<Vec<_>>(),
2,
))
ys.push(Tensor::cat(&(0..sz).map(|j| imgs.narrow(0, 4 * i + j, 1)).collect::<Vec<_>>(), 2))
}
Ok(Tensor::cat(&ys, 3).squeeze_dim(0))
}
Expand All @@ -102,12 +87,7 @@ pub fn main() -> Result<()> {

let random_batch_images = || {
let index = Tensor::randint(train_size, &[BATCH_SIZE], kind::INT64_CPU);
images
.index_select(0, &index)
.to_device(device)
.to_kind(Kind::Float)
/ 127.5
- 1.
images.index_select(0, &index).to_device(device).to_kind(Kind::Float) / 127.5 - 1.
};
let rand_latent = || {
(Tensor::rand(&[BATCH_SIZE, LATENT_DIM, 1, 1], kind::FLOAT_CPU) * 2.0 - 1.0)
Expand Down Expand Up @@ -146,9 +126,7 @@ pub fn main() -> Result<()> {
let generator_loss = {
let batch_images = random_batch_images();
let y_pred = batch_images.apply_t(&discriminator, true);
let y_pred_fake = rand_latent()
.apply_t(&generator, true)
.apply_t(&discriminator, true);
let y_pred_fake = rand_latent().apply_t(&generator, true).apply_t(&discriminator, true);
mse_loss(&y_pred, &(y_pred_fake.mean(Kind::Float) - 1))
+ mse_loss(&y_pred_fake, &(y_pred.mean(Kind::Float) + 1))
};
Expand Down
10 changes: 2 additions & 8 deletions examples/jit-quantized/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,10 @@ pub fn main() -> Result<()> {
for _ in 1..NRUNS {
let _output = image.unsqueeze(0).apply(&model);
}
println!(
"Mean Inference Time: {} ms",
now.elapsed().unwrap().as_millis() / NRUNS as u128
);
println!("Mean Inference Time: {} ms", now.elapsed().unwrap().as_millis() / NRUNS as u128);

// Apply the forward pass of the model to get the logits.
let output = image
.unsqueeze(0)
.apply(&model)
.softmax(-1, tch::Kind::Float);
let output = image.unsqueeze(0).apply(&model).softmax(-1, tch::Kind::Float);

// Print the top 5 categories for this image.
println!("Top 5 Predictions:");
Expand Down
11 changes: 2 additions & 9 deletions examples/jit-train/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,8 @@ fn train_and_save_model(dataset: &Dataset, device: Device) -> Result<()> {

let mut opt = Adam::default().build(&vs, 1e-4)?;
for epoch in 1..20 {
for (images, labels) in dataset
.train_iter(128)
.shuffle()
.to_device(vs.device())
.take(50)
{
let loss = trainable
.forward_t(&images, true)
.cross_entropy_for_logits(&labels);
for (images, labels) in dataset.train_iter(128).shuffle().to_device(vs.device()).take(50) {
let loss = trainable.forward_t(&images, true).cross_entropy_for_logits(&labels);
opt.backward_step(&loss);
}
let test_accuracy = trainable.batch_accuracy_for_logits(
Expand Down
5 changes: 1 addition & 4 deletions examples/jit/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,7 @@ pub fn main() -> Result<()> {
let model = tch::CModule::load(model_file)?;

// Apply the forward pass of the model to get the logits.
let output = image
.unsqueeze(0)
.apply(&model)
.softmax(-1, tch::Kind::Float);
let output = image.unsqueeze(0).apply(&model).softmax(-1, tch::Kind::Float);

// Print the top 5 categories for this image.
for (probability, class) in imagenet::top(&output, 5).iter() {
Expand Down
40 changes: 8 additions & 32 deletions examples/min-gpt/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,16 +80,9 @@ fn causal_self_attention(p: &nn::Path, cfg: Config) -> impl ModuleT {
let q = xs.apply(&query).view(sizes).transpose(1, 2);
let v = xs.apply(&value).view(sizes).transpose(1, 2);
let att = q.matmul(&k.transpose(-2, -1)) * (1.0 / f64::sqrt(sizes[3] as f64));
let att = att.masked_fill(
&mask.i((.., .., ..sz_t, ..sz_t)).eq(0.),
std::f64::NEG_INFINITY,
);
let att = att.masked_fill(&mask.i((.., .., ..sz_t, ..sz_t)).eq(0.), std::f64::NEG_INFINITY);
let att = att.softmax(-1, Kind::Float).dropout(cfg.attn_pdrop, train);
let ys = att
.matmul(&v)
.transpose(1, 2)
.contiguous()
.view([sz_b, sz_t, sz_c]);
let ys = att.matmul(&v).transpose(1, 2).contiguous().view([sz_b, sz_t, sz_c]);
ys.apply(&proj).dropout(cfg.resid_pdrop, train)
})
}
Expand All @@ -102,24 +95,14 @@ fn block(p: &nn::Path, cfg: Config) -> impl ModuleT {
let lin2 = linear(p / "lin2", 4 * cfg.n_embd, cfg.n_embd);
nn::func_t(move |xs, train| {
let xs = xs + xs.apply(&ln1).apply_t(&attn, train);
let ys = xs
.apply(&ln2)
.apply(&lin1)
.gelu()
.apply(&lin2)
.dropout(cfg.resid_pdrop, train);
let ys = xs.apply(&ln2).apply(&lin1).gelu().apply(&lin2).dropout(cfg.resid_pdrop, train);
xs + ys
})
}

fn gpt(p: &nn::Path, cfg: Config) -> impl ModuleT {
let p = &p.set_group(NO_WEIGHT_DECAY_GROUP);
let tok_emb = nn::embedding(
p / "tok_emb",
cfg.vocab_size,
cfg.n_embd,
Default::default(),
);
let tok_emb = nn::embedding(p / "tok_emb", cfg.vocab_size, cfg.n_embd, Default::default());
let pos_emb = p.zeros("pos_emb", &[1, cfg.block_size, cfg.n_embd]);
let ln_f = nn::layer_norm(p / "ln_f", vec![cfg.n_embd], Default::default());
let head = linear_no_bias(p / "head", cfg.n_embd, cfg.vocab_size);
Expand Down Expand Up @@ -184,14 +167,8 @@ pub fn main() -> Result<()> {
let mut sum_loss = 0.;
let mut cnt_loss = 0.;
for batch in data.iter_shuffle(BLOCK_SIZE + 1, BATCH_SIZE) {
let xs = batch
.narrow(1, 0, BLOCK_SIZE)
.to_kind(Kind::Int64)
.to_device(device);
let ys = batch
.narrow(1, 1, BLOCK_SIZE)
.to_kind(Kind::Int64)
.to_device(device);
let xs = batch.narrow(1, 0, BLOCK_SIZE).to_kind(Kind::Int64).to_device(device);
let ys = batch.narrow(1, 1, BLOCK_SIZE).to_kind(Kind::Int64).to_device(device);
let logits = xs.apply_t(&gpt, true);
let loss = logits
.view([BATCH_SIZE * BLOCK_SIZE, labels])
Expand Down Expand Up @@ -222,9 +199,8 @@ pub fn main() -> Result<()> {
if idx >= BLOCK_SIZE {
break;
}
let _filled = input
.i((0, BLOCK_SIZE - 1 - idx))
.fill_(data.char_to_label(c)? as i64);
let _filled =
input.i((0, BLOCK_SIZE - 1 - idx)).fill_(data.char_to_label(c)? as i64);
}
println!("Sample: {}", sample(&data, &gpt, input));
}
Expand Down
6 changes: 1 addition & 5 deletions examples/mnist/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,7 @@ mod mnist_nn;

fn main() -> Result<()> {
let args: Vec<String> = std::env::args().collect();
let model = if args.len() < 2 {
None
} else {
Some(args[1].as_str())
};
let model = if args.len() < 2 { None } else { Some(args[1].as_str()) };
match model {
None => mnist_nn::run(),
Some("linear") => mnist_linear::run(),
Expand Down
11 changes: 2 additions & 9 deletions examples/mnist/mnist_conv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,7 @@ impl Net {
let conv2 = nn::conv2d(vs, 32, 64, 5, Default::default());
let fc1 = nn::linear(vs, 1024, 1024, Default::default());
let fc2 = nn::linear(vs, 1024, 10, Default::default());
Net {
conv1,
conv2,
fc1,
fc2,
}
Net { conv1, conv2, fc1, fc2 }
}
}

Expand All @@ -48,9 +43,7 @@ pub fn run() -> Result<()> {
let mut opt = nn::Adam::default().build(&vs, 1e-4)?;
for epoch in 1..100 {
for (bimages, blabels) in m.train_iter(256).shuffle().to_device(vs.device()) {
let loss = net
.forward_t(&bimages, true)
.cross_entropy_for_logits(&blabels);
let loss = net.forward_t(&bimages, true).cross_entropy_for_logits(&blabels);
opt.backward_step(&loss);
}
let test_accuracy =
Expand Down
4 changes: 1 addition & 3 deletions examples/mnist/mnist_linear.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@ pub fn run() -> Result<()> {
let mut bs = Tensor::zeros(&[LABELS], kind::FLOAT_CPU).set_requires_grad(true);
for epoch in 1..200 {
let logits = m.train_images.mm(&ws) + &bs;
let loss = logits
.log_softmax(-1, Kind::Float)
.nll_loss(&m.train_labels);
let loss = logits.log_softmax(-1, Kind::Float).nll_loss(&m.train_labels);
ws.zero_grad();
bs.zero_grad();
loss.backward();
Expand Down
15 changes: 3 additions & 12 deletions examples/mnist/mnist_nn.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,7 @@ const LABELS: i64 = 10;

fn net(vs: &nn::Path) -> impl Module {
nn::seq()
.add(nn::linear(
vs / "layer1",
IMAGE_DIM,
HIDDEN_NODES,
Default::default(),
))
.add(nn::linear(vs / "layer1", IMAGE_DIM, HIDDEN_NODES, Default::default()))
.add_fn(|xs| xs.relu())
.add(nn::linear(vs, HIDDEN_NODES, LABELS, Default::default()))
}
Expand All @@ -25,13 +20,9 @@ pub fn run() -> Result<()> {
let net = net(&vs.root());
let mut opt = nn::Adam::default().build(&vs, 1e-3)?;
for epoch in 1..200 {
let loss = net
.forward(&m.train_images)
.cross_entropy_for_logits(&m.train_labels);
let loss = net.forward(&m.train_images).cross_entropy_for_logits(&m.train_labels);
opt.backward_step(&loss);
let test_accuracy = net
.forward(&m.test_images)
.accuracy_for_logits(&m.test_labels);
let test_accuracy = net.forward(&m.test_images).accuracy_for_logits(&m.test_labels);
println!(
"epoch: {:4} train loss: {:8.5} test acc: {:5.2}%",
epoch,
Expand Down
10 changes: 3 additions & 7 deletions examples/neural-style-transfer/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,7 @@ pub fn main() -> Result<()> {

let mut net_vs = tch::nn::VarStore::new(device);
let net = vgg::vgg16(&net_vs.root(), imagenet::CLASS_COUNT);
net_vs
.load(&weights)
.expect(&format!("Could not load weights file {}", &weights));
net_vs.load(&weights).expect(&format!("Could not load weights file {}", &weights));
net_vs.freeze();

let style_img = imagenet::load_image(&style_img)
Expand All @@ -56,10 +54,8 @@ pub fn main() -> Result<()> {

for step_idx in 1..(1 + TOTAL_STEPS) {
let input_layers = net.forward_all_t(&input_var, false, Some(max_layer));
let style_loss: Tensor = STYLE_INDEXES
.iter()
.map(|&i| style_loss(&input_layers[i], &style_layers[i]))
.sum();
let style_loss: Tensor =
STYLE_INDEXES.iter().map(|&i| style_loss(&input_layers[i], &style_layers[i])).sum();
let content_loss: Tensor = CONTENT_INDEXES
.iter()
.map(|&i| input_layers[i].mse_loss(&content_layers[i], tch::Reduction::Mean))
Expand Down
Loading

0 comments on commit bb177d4

Please sign in to comment.