|
import os |
|
import sys |
|
|
|
sys.dont_write_bytecode = True |
|
path = os.path.join(os.path.dirname(__file__), "..") |
|
if path not in sys.path: |
|
sys.path.insert(0, path) |
|
|
|
import argparse |
|
import torch |
|
import torch.distributed as dist |
|
from torch.nn.parallel import DistributedDataParallel |
|
from mmengine.config import Config |
|
from opentad.models import build_detector |
|
from opentad.datasets import build_dataset, build_dataloader |
|
from opentad.cores import eval_one_epoch |
|
from opentad.utils import update_workdir, set_seed, create_folder, setup_logger |
|
|
|
|
|
def parse_args(): |
|
parser = argparse.ArgumentParser(description="Test a Temporal Action Detector") |
|
parser.add_argument("config", metavar="FILE", type=str, help="path to config file") |
|
parser.add_argument("--checkpoint", type=str, default="none", help="the checkpoint path") |
|
parser.add_argument("--seed", type=int, default=42, help="random seed") |
|
parser.add_argument("--id", type=int, default=0, help="repeat experiment id") |
|
parser.add_argument("--not_eval", action="store_true", help="whether to not to eval, only do inference") |
|
args = parser.parse_args() |
|
return args |
|
|
|
|
|
def main(): |
|
args = parse_args() |
|
|
|
|
|
cfg = Config.fromfile(args.config) |
|
|
|
|
|
args.local_rank = int(os.environ["LOCAL_RANK"]) |
|
args.world_size = int(os.environ["WORLD_SIZE"]) |
|
args.rank = int(os.environ["RANK"]) |
|
print(f"Distributed init (rank {args.rank}/{args.world_size}, local rank {args.local_rank})") |
|
dist.init_process_group("nccl", rank=args.rank, world_size=args.world_size) |
|
torch.cuda.set_device(args.local_rank) |
|
|
|
|
|
set_seed(args.seed) |
|
cfg = update_workdir(cfg, args.id, torch.cuda.device_count()) |
|
if args.rank == 0: |
|
create_folder(cfg.work_dir) |
|
|
|
|
|
logger = setup_logger("Test", save_dir=cfg.work_dir, distributed_rank=args.rank) |
|
logger.info(f"Using torch version: {torch.__version__}, CUDA version: {torch.version.cuda}") |
|
logger.info(f"Config: \n{cfg.pretty_text}") |
|
|
|
|
|
test_dataset = build_dataset(cfg.dataset.test, default_args=dict(logger=logger)) |
|
test_loader = build_dataloader( |
|
test_dataset, |
|
rank=args.rank, |
|
world_size=args.world_size, |
|
shuffle=False, |
|
drop_last=False, |
|
**cfg.solver.test, |
|
) |
|
|
|
model = build_detector(cfg.model) |
|
|
|
|
|
model = model.to(args.local_rank) |
|
model = DistributedDataParallel(model, device_ids=[args.local_rank]) |
|
logger.info(f"Using DDP with {torch.cuda.device_count()} GPUS...") |
|
|
|
|
|
if args.checkpoint != "none": |
|
checkpoint_path = args.checkpoint |
|
elif "test_epoch" in cfg.inference.keys(): |
|
checkpoint_path = os.path.join(cfg.work_dir, f"checkpoint/epoch_{cfg.inference.test_epoch}.pth") |
|
else: |
|
checkpoint_path = os.path.join(cfg.work_dir, "checkpoint/best.pth") |
|
logger.info("Loading checkpoint from: {}".format(checkpoint_path)) |
|
device = f"cuda:{args.rank % torch.cuda.device_count()}" |
|
checkpoint = torch.load(checkpoint_path, map_location=device) |
|
logger.info("Checkpoint is epoch {}.".format(checkpoint["epoch"])) |
|
|
|
|
|
use_ema = getattr(cfg.solver, "ema", False) |
|
if use_ema: |
|
model.load_state_dict(checkpoint["state_dict_ema"]) |
|
logger.info("Using Model EMA...") |
|
else: |
|
model.load_state_dict(checkpoint["state_dict"]) |
|
|
|
|
|
use_amp = getattr(cfg.solver, "amp", False) |
|
if use_amp: |
|
logger.info("Using Automatic Mixed Precision...") |
|
|
|
|
|
logger.info("Testing Starts...\n") |
|
eval_one_epoch( |
|
test_loader, |
|
model, |
|
cfg, |
|
logger, |
|
args.rank, |
|
model_ema=None, |
|
use_amp=use_amp, |
|
world_size=args.world_size, |
|
not_eval=args.not_eval, |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|