-
Notifications
You must be signed in to change notification settings - Fork 0
/
ds_training.py
122 lines (93 loc) · 3.87 KB
/
ds_training.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
from __future__ import print_function
from singletons.Logger import Logger
import hydra
from omegaconf import OmegaConf
from hydra.utils import instantiate
import numpy as np
import random
import torch
from singletons.Device import Device
from agents.save.Checkpoint import Checkpoint
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import transforms
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
class DisentangledSpritesDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, file, transform=None):
"""
Args:
file (string): File containing the dSprites dataset
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.filepath = file
dataset_zip = np.load(self.filepath, allow_pickle=True, encoding='bytes')
# print('Keys in the dataset:', dataset_zip.keys())
self.imgs = dataset_zip['imgs']
self.latents_values = dataset_zip['latents_values']
self.latents_classes = dataset_zip['latents_classes']
self.metadata = dataset_zip['metadata'][()]
# print('Metadata: \n', self.metadata)
self.transform = transform
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
sample = self.imgs[idx].astype(np.float32)
if self.transform:
sample = self.transform(sample)
return sample, []
def load_dsprites(config, val_split=0.9):
# img_size = 64
path = config["env"]["images_archive"]
dataset = DisentangledSpritesDataset(path, transform=transforms.ToTensor())
# Create data indices for training and validation splits:
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(val_split * dataset_size))
np.random.seed(config["seed"])
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Create data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
train_loader = DataLoader(dataset, batch_size=config["batch_size"], sampler=train_sampler)
val_loader = DataLoader(dataset, batch_size=config["batch_size"], sampler=val_sampler)
return train_loader, val_loader
@hydra.main(config_path="config", config_name="training")
def train(config):
# Set the seed requested by the user.
np.random.seed(config["seed"])
random.seed(config["seed"])
torch.manual_seed(config["seed"])
# Create the logger and keep track of the configuration.
Logger.get(name="Training").info("Configuration:\n{}".format(OmegaConf.to_yaml(config)))
# Load the dsprites dataset.
train_loader, _ = load_dsprites(config, val_split=0.1)
# Create the agent.
archive = Checkpoint(config["checkpoint"]["file"])
agent = archive.load_model() if archive.exists() else instantiate(config["agent"])
# Train the agent.
for epoch in range(1, 100):
for _, (data, _) in enumerate(train_loader):
# Sent the data to device.
data = data.to(Device.get())
# Compute the variational free energy.
vfe_loss = agent.compute_vfe(config, data)
# Perform one step of gradient descent.
agent.optimizer.zero_grad()
vfe_loss.backward()
agent.optimizer.step()
# Save the agent (if needed).
if agent.steps_done % config["checkpoint"]["frequency"] == 0:
agent.save(config["checkpoint"]["file"])
# Increase number of steps done.
agent.steps_done += 1
Logger.get().info("End.")
if __name__ == '__main__':
# Make hydra able to load tuples.
OmegaConf.register_new_resolver("tuple", lambda *args: tuple(args))
# Train the DGN.
train()