Skip to content

Commit

Permalink
offline test
Browse files Browse the repository at this point in the history
  • Loading branch information
Janetalready committed Sep 23, 2022
1 parent bd05290 commit 8297eb1
Show file tree
Hide file tree
Showing 4 changed files with 87 additions and 13 deletions.
56 changes: 55 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1 +1,55 @@
# VisualCommunication
=======
# Emergent Graphical Conventions in a Visual Communication Game
## Environment setup
```
numpy
torch==1.11.0
opencv-python
argparse
torchvision
wandb
tqdm
```
## dataset and pretrained model
The pixel-level sketch is generated by [Synthesizing human-like sketches from natural images](https://github.com/kampelmuehler/synthesizing_human_like_sketches)

Sender is pretrained with [ICCV2019-Learning to Paint](https://github.com/megvii-research/ICCV2019-LearningToPaint). The color output is reset to all ones.

The generated data and pretrained model are saved at
https://drive.google.com/drive/folders/1bhLefcXMllR8vf_Uz3aOb0OSGxZKM3RI?usp=sharing

put data under `data` folder, put pretrained models under `pretrained` folder

## Training
complete setting
```
python train_dreamer_add_class_gen.py --batch_size=64 --max_step=7 --game_size=4 --outf='./output_comp_rs0/' --exp='comp_rs0' --log_outf='train_log' --validate_episode=10 --discount=0.85 --sender_path='pretrained/actor.pkl' --num_stroke=5 --category_list='data/category.txt' --n_games=50000 --split_root='data/same_cate_mul_image300' --sender_decay=0.99 --manualSeed=0 --setting complete --data_root 'data/output/'
```
max setting
```
python train_dreamer_add_class_gen.py --batch_size=64 --max_step=7 --game_size=4 --outf='./output_max_rs0/' --exp='max_rs0' --log_outf='train_log' --validate_episode=10 --discount=0.85 --sender_path='pretrained/actor.pkl' --num_stroke=5 --category_list='data/category.txt' --n_games=50000 --split_root='data/same_cate_mul_image300' --sender_decay=0.99 --manualSeed=0 --setting max --data_root='data/output/'
```
sender-fixed setting
```
python train_dreamer_add_class_gen.py --batch_size=64 --max_step=7 --game_size=4 --outf='./output_sender_fix_rs0/' --exp='sender_fix_rs0' --log_outf='train_log' --validate_episode=10 --discount=0.85 --sender_path='pretrained/actor.pkl' --num_stroke=5 --category_list='data/category.txt' --n_games=50000 --split_root='data/same_cate_mul_image300' --sender_decay=0.99 --manualSeed=0 --setting sender_fix --data_root='data/output/' --sender_fixed 1
```
one-step setting
```
python train_dreamer_add_class_gen.py --batch_size=64 --max_step=1 --game_size=4 --outf='./output_one_step_rs0/' --exp='one_step_rs0' --log_outf='train_log' --validate_episode=10 --discount=0.85 --sender_path='pretrained/actor.pkl' --num_stroke=5 --category_list='data/category.txt' --n_games=50000 --split_root='data/same_cate_mul_image300' --sender_decay=0.99 --manualSeed=0 --setting one_step --data_root='data/output/'
```
retrieve
```
python train_original_reward_retrieve_5stroke_accum.py --batch_size=64 --max_step=7 --game_size=4 --outf='./output_retrieve_rs0/' --exp='retrieve_rs0' --log_outf='train_log' --validate_episode=10 --discount=0.85 --sender_path='pretrained/actor.pkl' --num_stroke=5 --category_list='data/category.txt' --n_games=50000 --split_root='data/same_cate_mul_image300' --sender_decay=0.99 --manualSeed=0 --setting retrieve --data_root='data/output/' --sender_fixed 1
```
accumulative reward
```
python train_original_reward_retrieve_5stroke_accum.py --batch_size=64 --max_step=7 --game_size=4 --outf='./output_cum_rs0/' --exp='cum_rs0' --log_outf='train_log' --validate_episode=10 --discount=0.85 --sender_path='pretrained/actor.pkl' --num_stroke=5 --category_list='data/category.txt' --n_games=50000 --split_root='data/same_cate_mul_image300' --sender_decay=0.99 --manualSeed=0 --setting cum --data_root='data/output/'
```
model test for generalization and classification

Test data pairs are generated by `data_preprocessing.py`.

Example to evaluate complete setting
```
python train_dreamer_add_class_gen.py --batch_size=64 --max_step=7 --game_size=4 --outf='./output_comp_rs0/' --exp='comp_rs0' --log_outf='train_log' --validate_episode=10 --discount=0.85 --sender_path='pretrained/actor.pkl' --num_stroke=5 --category_list='data/category.txt' --n_games=50000 --split_root='data/same_cate_mul_image300' --sender_decay=0.99 --manualSeed=0 --setting complete --data_root 'data/output/' --resume_path 'ckpt/comp_rs0.pt' --offline_test 1
```
14 changes: 10 additions & 4 deletions train_dreamer_add_class_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@
opt = parse_arguments()
if opt.setting == 'max':
from architectures_dreamer_max import Sender, ReceiverOnestep, ValueModel, Players
elif opt.setting == 'retrieve':
from architectures_retrieve_5stroke import Sender, ReceiverOnestep, ValueModel, Players
else:
from architectures_dreamer_v2 import Sender, ReceiverOnestep, ValueModel, Players
import pdb
Expand Down Expand Up @@ -301,8 +299,8 @@ def test_generalization(opt, players, steps, pair_list, set_name):

assert len(game_steps) == len(pair_list)
assert len(gt_test) == len(pair_list)
# print(sum(game_steps) / float(len(pair_list)))
# print(sum(gt_test) / float(len(pair_list)))
print('avg_step', sum(game_steps) / float(len(pair_list)))
print('test_acc', sum(gt_test) / float(len(pair_list)))
wandb.log({f'generalization/test_{set_name}': sum(gt_test) / float(len(pair_list)),
f'generalization/avg_step_{set_name}': sum(game_steps) / float(len(pair_list)),
}, step=steps)
Expand Down Expand Up @@ -347,6 +345,14 @@ def train(opt):
if opt.cuda:
players.cuda()

if opt.offline_test:
save_step_evolve(opt, players, 0, pair_list)
test_generalization(opt, players, 0, pair_list, 'train')
test_generalization(opt, players, 0, pair_list_test, 'test')
test_generalization(opt, players, 0, pair_list_unseen_cate, 'unseen_cate')
classification_train.train(opt, cate_list, 0)
exit(0)

optimizer_r = optim.Adam(filter(lambda p: p.requires_grad, players.receiver.parameters()),
lr=opt.receiver_lr, betas=(opt.beta1, opt.beta2))
optimizer_s = optim.Adam(filter(lambda p: p.requires_grad, players.sender.parameters()),
Expand Down
15 changes: 14 additions & 1 deletion train_original_reward_retrieve_5stroke_accum.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
import classification.train as classification_train
from tqdm import tqdm
import wandb

os.environ['WANDB_SILENT']="true"
wandb.login()
USE_CUDA = torch.cuda.is_available()

Expand Down Expand Up @@ -327,12 +327,25 @@ def train(opt):
receiver.to(device)
players = Players(sender, receiver)
players.sender.apply(set_bn_eval)

if opt.sender_fixed:
players.sender.eval()

if opt.cuda:
players.cuda()

if opt.resume_path is not None:
print('loaded:{}'.format(opt.resume_path))
players.load_state_dict(torch.load(opt.resume_path))

if opt.offline_test:
save_step_evolve(opt, players, 0, pair_list)
test_generalization(opt, players, 0, pair_list, 'train')
test_generalization(opt, players, 0, pair_list_test, 'test')
test_generalization(opt, players, 0, pair_list_unseen_cate, 'unseen_cate')
classification_train.train(opt, cate_list, 0)
exit(0)

if opt.opti == 'adam':
optimizer_r = optim.Adam(filter(lambda p: p.requires_grad, players.receiver.parameters()),
lr=opt.receiver_lr, betas=(opt.beta1, opt.beta2))
Expand Down
15 changes: 8 additions & 7 deletions utils_dreamer.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,22 +22,23 @@ def compute_similarity_images(space):
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
'--split_root', default='./data/same_cate_mul_image300', help='split root folder')
'--split_root', default='../visual_communication_II/data/same_cate_mul_image300', help='split root folder')
parser.add_argument(
'--category_list', default='./data/category.txt', help='split root folder')
'--category_list', default='../visual_communication_II/data/category.txt', help='split root folder')
parser.add_argument(
'--data_root', default='./data/output/', help='data root folder')
'--data_root', default='../synthesizing_human_like_sketches/output/', help='data root folder')
parser.add_argument(
'--sender_path', default='./pretrained/actor_one_stroke.pkl', help='pretrained folder')
'--sender_path', default='../visual_communication_II/signal_game/pretrained/actor.pkl', help='pretrained folder')
parser.add_argument(
'--resume_path', default=None, help='pretrained folder')
parser.add_argument(
'--setting', default='complete', help='game settings')
parser.add_argument('--offline_test', type=int, default=0, help='offline test')
parser.add_argument('--cuda', type=int, default=1, help='enables cuda')
parser.add_argument('--max_step', type=int,
help='number of drawing steps', default=10)
help='number of drawing steps', default=7)
parser.add_argument('--num_stroke', type=int,
help='number of strokes', default=3)
help='number of strokes', default=5)
parser.add_argument('--sender_decay', type=float,
help='sender_decay_rate', default=0.95)
parser.add_argument('--receiver_decay', type=float,
Expand Down Expand Up @@ -84,7 +85,7 @@ def parse_arguments():
help='folder to experiment')
parser.add_argument('--log_outf', default='train_log_dreamer',
help='folder to training log')
parser.add_argument('--manualSeed', type=int,default=0,
parser.add_argument('--manualSeed', type=int,default=10,
help='manual seed')
parser.add_argument('--game_size', type=int, default=4,
help='game size')
Expand Down

0 comments on commit 8297eb1

Please sign in to comment.