File size: 4,019 Bytes
8aa674c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
import os
import sys
sys.dont_write_bytecode = True
path = os.path.join(os.path.dirname(__file__), "..")
if path not in sys.path:
sys.path.insert(0, path)
import argparse
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from mmengine.config import Config
from opentad.models import build_detector
from opentad.datasets import build_dataset, build_dataloader
from opentad.cores import eval_one_epoch
from opentad.utils import update_workdir, set_seed, create_folder, setup_logger
def parse_args():
parser = argparse.ArgumentParser(description="Test a Temporal Action Detector")
parser.add_argument("config", metavar="FILE", type=str, help="path to config file")
parser.add_argument("--checkpoint", type=str, default="none", help="the checkpoint path")
parser.add_argument("--seed", type=int, default=42, help="random seed")
parser.add_argument("--id", type=int, default=0, help="repeat experiment id")
parser.add_argument("--not_eval", action="store_true", help="whether to not to eval, only do inference")
args = parser.parse_args()
return args
def main():
args = parse_args()
# load config
cfg = Config.fromfile(args.config)
# DDP init
args.local_rank = int(os.environ["LOCAL_RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.rank = int(os.environ["RANK"])
print(f"Distributed init (rank {args.rank}/{args.world_size}, local rank {args.local_rank})")
dist.init_process_group("nccl", rank=args.rank, world_size=args.world_size)
torch.cuda.set_device(args.local_rank)
# set random seed, create work_dir
set_seed(args.seed)
cfg = update_workdir(cfg, args.id, torch.cuda.device_count())
if args.rank == 0:
create_folder(cfg.work_dir)
# setup logger
logger = setup_logger("Test", save_dir=cfg.work_dir, distributed_rank=args.rank)
logger.info(f"Using torch version: {torch.__version__}, CUDA version: {torch.version.cuda}")
logger.info(f"Config: \n{cfg.pretty_text}")
# build dataset
test_dataset = build_dataset(cfg.dataset.test, default_args=dict(logger=logger))
test_loader = build_dataloader(
test_dataset,
rank=args.rank,
world_size=args.world_size,
shuffle=False,
drop_last=False,
**cfg.solver.test,
)
# build model
model = build_detector(cfg.model)
# DDP
model = model.to(args.local_rank)
model = DistributedDataParallel(model, device_ids=[args.local_rank])
logger.info(f"Using DDP with {torch.cuda.device_count()} GPUS...")
# load checkpoint: args -> config -> best
if args.checkpoint != "none":
checkpoint_path = args.checkpoint
elif "test_epoch" in cfg.inference.keys():
checkpoint_path = os.path.join(cfg.work_dir, f"checkpoint/epoch_{cfg.inference.test_epoch}.pth")
else:
checkpoint_path = os.path.join(cfg.work_dir, "checkpoint/best.pth")
logger.info("Loading checkpoint from: {}".format(checkpoint_path))
device = f"cuda:{args.rank % torch.cuda.device_count()}"
checkpoint = torch.load(checkpoint_path, map_location=device)
logger.info("Checkpoint is epoch {}.".format(checkpoint["epoch"]))
# Model EMA
use_ema = getattr(cfg.solver, "ema", False)
if use_ema:
model.load_state_dict(checkpoint["state_dict_ema"])
logger.info("Using Model EMA...")
else:
model.load_state_dict(checkpoint["state_dict"])
# AMP: automatic mixed precision
use_amp = getattr(cfg.solver, "amp", False)
if use_amp:
logger.info("Using Automatic Mixed Precision...")
# test the detector
logger.info("Testing Starts...\n")
eval_one_epoch(
test_loader,
model,
cfg,
logger,
args.rank,
model_ema=None, # since we have load the ema in the model
use_amp=use_amp,
world_size=args.world_size,
not_eval=args.not_eval,
)
if __name__ == "__main__":
main()
|