-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmae_eval_finetune.sh
59 lines (50 loc) · 1.47 KB
/
mae_eval_finetune.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#!/bin/bash
set -e
# choose GPU
export CUDA_VISIBLE_DEVICES=1
# define the network and dataset path
MODEL="mae_Deit_tiny_patch4" # choose model
DATA_PATH="./CIFAR10" # the path of CIFAR10
IMG_SIZE=32
NB_CLASSES=10
# define the path to save models and the log, and the save frequency
OUTPUT_DIR="./MAE-1-normPixelLoss/eval_finetune/output_dir"
LOG_DIR="./MAE-1-normPixelLoss/eval_finetune/log_dir"
# SAVE_FREQ=20
# hyperparameters
BATCH_SIZE=256
EPOCHS=100 # follow the requirement
LR=1e-4
# finetuning
CHECK_POINT="./MAE-1-normPixelLoss/eval_linear/output_dir/checkpoint-99.pth"
python main_finetune.py \
--model $MODEL \
--data_path $DATA_PATH \
--output_dir $OUTPUT_DIR \
--log_dir $LOG_DIR \
--batch_size $BATCH_SIZE \
--epochs $EPOCHS \
--lr $LR \
--device cuda \
--nb_classes $NB_CLASSES \
--finetune $CHECK_POINT \
--input_size $IMG_SIZE
# ---------------------------------------------------------------------
# # Run training with torch.distributed.launch
# python -m torch.distributed.launch \
# --nproc_per_node=2 \
# --master_port=12355 \
# main_pretrain.py \
# --model $MODEL \
# --data_path $DATA_PATH \
# --output_dir $OUTPUT_DIR \
# --log_dir $LOG_DIR \
# --batch_size $BATCH_SIZE \
# --epochs $EPOCHS \
# --lr $LR \
# --weight_decay $WEIGHT_DECAY \
# --save_freq $SAVE_FREQ \
# --device cuda \
# --input_size 32 \
# --world_size 2 \
# --dist_url 'tcp://127.0.0.1:12355'