Spaces:
No application file
No application file
ifire
commited on
Commit
•
81685e4
0
Parent(s):
Squash
Browse files- .gitignore +7 -0
- README.md +9 -0
- avatar_rigging.yaml +27 -0
- cog.yaml +17 -0
- datasets/.keep +0 -0
- train.py +33 -0
.gitignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.tmp
|
2 |
+
results
|
3 |
+
pre_trained
|
4 |
+
datasets/imagenette2
|
5 |
+
.cog
|
6 |
+
__pycache__
|
7 |
+
results
|
README.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
> Assume that we have a dataset in which the training set contains only normal images, and the test set contains both normal and abnormal images. We want to train an anomaly segmentation model that will be able to detect the abnormal regions in the test set. We only have normal images in our dataset but would like to train a segmentation model. Use the synthetic anomaly generation feature to create abnormal images from normal images, and perform the validation and test steps.
|
2 |
+
|
3 |
+
```
|
4 |
+
pip3 install anomalib
|
5 |
+
anomalib install --option core
|
6 |
+
cog train --input
|
7 |
+
```
|
8 |
+
|
9 |
+
In vr generate photos of normal images of avatar motion. Use to detect abnormal avatar animation.
|
avatar_rigging.yaml
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
class_path: anomalib.models.EfficientAd
|
3 |
+
init_args:
|
4 |
+
imagenet_dir: avatar_rigging_dataset
|
5 |
+
teacher_out_channels: 384
|
6 |
+
model_size: S
|
7 |
+
lr: 0.0001
|
8 |
+
weight_decay: 1.0e-05
|
9 |
+
padding: false
|
10 |
+
pad_maps: true
|
11 |
+
data:
|
12 |
+
class_path: anomalib.data.Folder
|
13 |
+
init_args:
|
14 |
+
name: avatar_rigging
|
15 |
+
root: avatar_rigging_dataset
|
16 |
+
normal_dir: nominal
|
17 |
+
abnormal_dir: anomaly
|
18 |
+
train_batch_size: 1
|
19 |
+
eval_batch_size: 32
|
20 |
+
num_workers: 8
|
21 |
+
task: segmentation
|
22 |
+
train_transform: null
|
23 |
+
eval_transform: null
|
24 |
+
test_split_mode: synthetic
|
25 |
+
test_split_ratio: 0.2
|
26 |
+
val_split_mode: same_as_test
|
27 |
+
val_split_ratio: 0.5
|
cog.yaml
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
build:
|
2 |
+
gpu: true
|
3 |
+
system_packages:
|
4 |
+
- "libgl1-mesa-glx"
|
5 |
+
- "libglib2.0-0"
|
6 |
+
- "python3-setuptools"
|
7 |
+
python_version: "3.10"
|
8 |
+
cuda: "11.5" # Query nvcc --version
|
9 |
+
python_packages:
|
10 |
+
- "torch==2.3.1"
|
11 |
+
- torchvision
|
12 |
+
- torchaudio
|
13 |
+
- "anomalib[full]==1.1.0"
|
14 |
+
- "openvino==2024.2.0"
|
15 |
+
- "lightning==2.3.0"
|
16 |
+
- "scikit-learn==1.5.0"
|
17 |
+
train: "train.py:train"
|
datasets/.keep
ADDED
File without changes
|
train.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from anomalib.data.image.folder import Folder
|
2 |
+
from anomalib.models import EfficientAd
|
3 |
+
from anomalib.models.image.efficient_ad.lightning_model import EfficientAdModelSize
|
4 |
+
from anomalib.data.base.dataset import TaskType
|
5 |
+
from anomalib.data.utils import TestSplitMode, ValSplitMode
|
6 |
+
from anomalib.engine import Engine
|
7 |
+
from cog import Path, BaseModel, Input
|
8 |
+
import io
|
9 |
+
import shutil
|
10 |
+
|
11 |
+
class TrainingOutput(BaseModel):
|
12 |
+
weights: Path
|
13 |
+
dataset_root: Path
|
14 |
+
pretrained: Path
|
15 |
+
|
16 |
+
def train(normal_dir: list[Path] = Input(description="A file containing training normal data"),) -> TrainingOutput:
|
17 |
+
_normal_dir = Path("normal")
|
18 |
+
_dataset_dir = Path("dataset")
|
19 |
+
_dataset_normal_dir = _dataset_dir / _normal_dir
|
20 |
+
_dataset_normal_dir.mkdir(parents=True, exist_ok=True)
|
21 |
+
for dir_path in normal_dir:
|
22 |
+
for file_path in dir_path.iterdir():
|
23 |
+
if file_path.is_file():
|
24 |
+
shutil.copy(file_path, str(_dataset_normal_dir))
|
25 |
+
datamodule = Folder(name="hazelnut_toy", normal_dir=str(_normal_dir), root=str(_dataset_dir), abnormal_dir=None, normal_test_dir=None, mask_dir=None, normal_split_ratio=0.2, extensions=None, train_batch_size=1, eval_batch_size=32, num_workers=8, task=TaskType.SEGMENTATION, image_size=None, transform=None, train_transform=None, eval_transform=None, test_split_mode=TestSplitMode.SYNTHETIC, test_split_ratio=0.2, val_split_mode=ValSplitMode.FROM_TEST, val_split_ratio=0.5, seed=None)
|
26 |
+
datamodule.setup()
|
27 |
+
model = EfficientAd(imagenet_dir=_dataset_dir, teacher_out_channels=384, model_size=EfficientAdModelSize.S, lr=0.0001, weight_decay=1e-05, padding=False, pad_maps=True)
|
28 |
+
engine = Engine()
|
29 |
+
engine.train(datamodule=datamodule, model=model)
|
30 |
+
weights_file = "results/EfficientAd/dataset/latest/weights/lightning/model.ckpt"
|
31 |
+
return TrainingOutput(weights=Path(weights_file), dataset_root=Path(normal_dir), pretrained=Path("pre_trained"))
|
32 |
+
|
33 |
+
# anomalib predict --return_predictions false --ckpt_path results/EfficientAd/avatar_rigging/latest/weights/lightning/model.ckpt --config results/EfficientAd/avatar_rigging/latest/config.yaml
|