forked from shenzebang/Federated-Learning-Pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun_ICLR2022.sh
executable file
·34 lines (31 loc) · 1.69 KB
/
run_ICLR2022.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
########################################################################################
# massive, alpha=.1, IR=10
## CLIMB
python run_PD_FL.py --dataset mnist --homo_ratio 0.1 --n_workers_per_round 100\
--reduce_to_ratio .1 --use_ray --imbalance\
--formulation imbalance-fl --learner fed-avg --local_lr 5e-2\
--n_pd_rounds 1000 --loss_fn cross-entropy-loss --model mlp\
--n_workers 100 --n_p_steps 5 --dense_hid_dims 128-128 --no_data_augmentation\
--lambda_lr 2 --tolerance_epsilon .01 --use_gradient_clip
## Baseline
python run_FL.py --dataset mnist --homo_ratio 0.1 --n_workers_per_round 100\
--reduce_to_ratio .1 --use_ray --imbalance\
--learner fed-avg --local_lr 5e-2\
--n_global_rounds 5000 --loss_fn cross-entropy-loss --model mlp\
--n_workers 100 --eval_freq 5 --dense_hid_dims 128-128 --no_data_augmentation\
--use_gradient_clip
## Ratio-loss
python run_PD_FL.py --dataset mnist --homo_ratio 0.1 --n_workers_per_round 100\
--reduce_to_ratio .1 --use_ray --imbalance\
--formulation ratioloss-fl --learner fed-avg --local_lr 5e-2\
--n_pd_rounds 200 --loss_fn cross-entropy-loss --model mlp\
--n_workers 500 --n_p_steps 5 --dense_hid_dims 128-128 --no_data_augmentation\
--use_gradient_clip
## Focal-loss
python run_FL.py --dataset mnist --homo_ratio 0.1 --n_workers_per_round 100\
--reduce_to_ratio .1 --use_ray --imbalance\
--learner fed-avg --local_lr 5e-2\
--n_global_rounds 1000 --loss_fn focal-loss --model mlp\
--n_workers 500 --eval_freq 5 --dense_hid_dims 128-128 --no_data_augmentation\
--use_gradient_clip
########################################################################################