forked from JDAI-CV/FaceX-Zoo
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
wangjun492
committed
Nov 8, 2021
1 parent
34dec6d
commit 2f97a0e
Showing
9 changed files
with
1,040 additions
and
18 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,102 @@ | ||
# -------------------------------------------------------- | ||
# Swin Transformer | ||
# Copyright (c) 2021 Microsoft | ||
# Licensed under The MIT License [see LICENSE for details] | ||
# Written by Ze Liu | ||
# -------------------------------------------------------- | ||
|
||
import torch | ||
from timm.scheduler.cosine_lr import CosineLRScheduler | ||
from timm.scheduler.step_lr import StepLRScheduler | ||
from timm.scheduler.scheduler import Scheduler | ||
|
||
|
||
def build_scheduler(optimizer, n_iter_per_epoch, epoches, warm_up_epoches): | ||
num_steps = int(epoches * n_iter_per_epoch) | ||
warmup_steps = int(warm_up_epoches * n_iter_per_epoch) | ||
lr_scheduler = None | ||
NAME = 'cosine' | ||
if NAME == 'cosine': | ||
lr_scheduler = CosineLRScheduler( | ||
optimizer, | ||
t_initial=num_steps, | ||
t_mul=1., | ||
lr_min=5.0e-06, | ||
warmup_lr_init=5.0e-07, | ||
warmup_t=warmup_steps, | ||
cycle_limit=1, | ||
t_in_epochs=False, | ||
) | ||
elif NAME == 'linear': | ||
lr_scheduler = LinearLRScheduler( | ||
optimizer, | ||
t_initial=num_steps, | ||
lr_min_rate=0.01, | ||
warmup_lr_init=config.TRAIN.WARMUP_LR, | ||
warmup_t=warmup_steps, | ||
t_in_epochs=False, | ||
) | ||
elif NAME == 'step': | ||
decay_steps = int(2 * n_iter_per_epoch) | ||
lr_scheduler = StepLRScheduler( | ||
optimizer, | ||
decay_t=decay_steps, | ||
decay_rate=config.TRAIN.LR_SCHEDULER.DECAY_RATE, | ||
warmup_lr_init=config.TRAIN.WARMUP_LR, | ||
warmup_t=warmup_steps, | ||
t_in_epochs=False, | ||
) | ||
|
||
return lr_scheduler | ||
|
||
|
||
class LinearLRScheduler(Scheduler): | ||
def __init__(self, | ||
optimizer: torch.optim.Optimizer, | ||
t_initial: int, | ||
lr_min_rate: float, | ||
warmup_t=0, | ||
warmup_lr_init=0., | ||
t_in_epochs=True, | ||
noise_range_t=None, | ||
noise_pct=0.67, | ||
noise_std=1.0, | ||
noise_seed=42, | ||
initialize=True, | ||
) -> None: | ||
super().__init__( | ||
optimizer, param_group_field="lr", | ||
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, | ||
initialize=initialize) | ||
|
||
self.t_initial = t_initial | ||
self.lr_min_rate = lr_min_rate | ||
self.warmup_t = warmup_t | ||
self.warmup_lr_init = warmup_lr_init | ||
self.t_in_epochs = t_in_epochs | ||
if self.warmup_t: | ||
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] | ||
super().update_groups(self.warmup_lr_init) | ||
else: | ||
self.warmup_steps = [1 for _ in self.base_values] | ||
|
||
def _get_lr(self, t): | ||
if t < self.warmup_t: | ||
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] | ||
else: | ||
t = t - self.warmup_t | ||
total_t = self.t_initial - self.warmup_t | ||
lrs = [v - ((v - v * self.lr_min_rate) * (t / total_t)) for v in self.base_values] | ||
return lrs | ||
|
||
def get_epoch_values(self, epoch: int): | ||
if self.t_in_epochs: | ||
return self._get_lr(epoch) | ||
else: | ||
return None | ||
|
||
def get_update_values(self, num_updates: int): | ||
if not self.t_in_epochs: | ||
return self._get_lr(num_updates) | ||
else: | ||
return None |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
# -------------------------------------------------------- | ||
# Swin Transformer | ||
# Copyright (c) 2021 Microsoft | ||
# Licensed under The MIT License [see LICENSE for details] | ||
# Written by Ze Liu | ||
# -------------------------------------------------------- | ||
|
||
from torch import optim as optim | ||
|
||
|
||
def build_optimizer(model, lr, weight_decay=0.05): | ||
""" | ||
Build optimizer, set weight decay of normalization to 0 by default. | ||
""" | ||
skip = {} | ||
skip_keywords = {} | ||
if hasattr(model, 'no_weight_decay'): | ||
skip = model.no_weight_decay() | ||
if hasattr(model, 'no_weight_decay_keywords'): | ||
skip_keywords = model.no_weight_decay_keywords() | ||
parameters = set_weight_decay(model, skip, skip_keywords) | ||
|
||
opt_lower = 'adamw' | ||
optimizer = None | ||
if opt_lower == 'sgd': | ||
optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True, | ||
lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY) | ||
elif opt_lower == 'adamw': | ||
optimizer = optim.AdamW(parameters, eps=1.0e-08, betas=[0.9, 0.999], | ||
lr=lr, weight_decay=weight_decay) | ||
|
||
return optimizer | ||
|
||
|
||
def set_weight_decay(model, skip_list=(), skip_keywords=()): | ||
has_decay = [] | ||
no_decay = [] | ||
|
||
for name, param in model.named_parameters(): | ||
if not param.requires_grad: | ||
continue # frozen weights | ||
if len(param.shape) == 1 or name.endswith(".bias") or (name in skip_list) or \ | ||
check_keywords_in_name(name, skip_keywords): | ||
no_decay.append(param) | ||
# print(f"{name} has no weight decay") | ||
else: | ||
has_decay.append(param) | ||
return [{'params': has_decay}, | ||
{'params': no_decay, 'weight_decay': 0.}] | ||
|
||
|
||
def check_keywords_in_name(name, keywords=()): | ||
isin = False | ||
for keyword in keywords: | ||
if keyword in name: | ||
isin = True | ||
return isin |
Oops, something went wrong.