-
Notifications
You must be signed in to change notification settings - Fork 3
/
save.py
58 lines (49 loc) · 2.35 KB
/
save.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
"""
# Created: 2023-12-26 12:41
# Copyright (C) 2023-now, RPL, KTH Royal Institute of Technology
# Author: Qingwen Zhang (https://kin-zhang.github.io/)
#
# This file is part of DeFlow (https://github.com/KTH-RPL/DeFlow).
# If you find this repo helpful, please cite the respective publication as
# listed on the above website.
# Description: produce flow based on model predict and write into the dataset,
# then use `tools/visualization.py` to visualize the flow.
"""
import torch
from torch.utils.data import DataLoader
import lightning.pytorch as pl
from lightning.pytorch.loggers import WandbLogger
from omegaconf import DictConfig, OmegaConf
import hydra, wandb, os, sys
from hydra.core.hydra_config import HydraConfig
from src.dataset import HDF5Dataset
from src.trainer import ModelWrapper
from src.utils import bc
@hydra.main(version_base=None, config_path="conf", config_name="save")
def main(cfg):
pl.seed_everything(cfg.seed, workers=True)
output_dir = HydraConfig.get().runtime.output_dir
if not os.path.exists(cfg.checkpoint):
print(f"Checkpoint {cfg.checkpoint} does not exist. Need checkpoints for evaluation.")
sys.exit(1)
if cfg.res_name is None:
cfg.res_name = cfg.checkpoint.split("/")[-1].split(".")[0]
print(f"{bc.BOLD}NOTE{bc.ENDC}: res_name is not specified, use {bc.OKBLUE}{cfg.res_name}{bc.ENDC} as default.")
checkpoint_params = DictConfig(torch.load(cfg.checkpoint)["hyper_parameters"])
cfg.output = checkpoint_params.cfg.output
cfg.model.update(checkpoint_params.cfg.model)
mymodel = ModelWrapper.load_from_checkpoint(cfg.checkpoint, cfg=cfg, eval=True)
wandb_logger = WandbLogger(save_dir=output_dir,
entity="kth-rpl",
project=f"deflow-eval",
name=f"{cfg.output}",
offline=True)
trainer = pl.Trainer(logger=wandb_logger, devices=1)
# NOTE(Qingwen): search & check in pl_model.py : def test_step(self, batch, res_dict)
trainer.test(model = mymodel, \
dataloaders = DataLoader(\
HDF5Dataset(cfg.dataset_path, n_frames=checkpoint_params.cfg.num_frames if 'num_frames' in checkpoint_params.cfg else 2), \
batch_size=1, shuffle=False))
wandb.finish()
if __name__ == "__main__":
main()