diff --git a/README.md b/README.md deleted file mode 100644 index b2e000627c824ef032fad7701db93635b55afdc1..0000000000000000000000000000000000000000 --- a/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AnomalyGPT -emoji: 📉 -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..239cc305b582eed0086ac94bd5ce62ceeafe7b52 --- /dev/null +++ b/app.py @@ -0,0 +1,248 @@ +import gradio as gr +import mdtex2html +from model.openllama import OpenLLAMAPEFTModel +import torch +from io import BytesIO +from PIL import Image as PILImage +import cv2 +import numpy as np +from matplotlib import pyplot as plt +from torchvision import transforms + +# init the model +args = { + 'model': 'openllama_peft', + 'imagebind_ckpt_path': './pretrained_ckpt/imagebind_ckpt/imagebind_huge.pth', + 'vicuna_ckpt_path': './pretrained_ckpt/vicuna_ckpt/7b_v0', + 'anomalygpt_ckpt_path': './ckpt/train_cn/pytorch_model.pt', + 'delta_ckpt_path': './pretrained_ckpt/pandagpt_ckpt/7b/pytorch_model.pt', + 'stage': 2, + 'max_tgt_len': 128, + 'lora_r': 32, + 'lora_alpha': 32, + 'lora_dropout': 0.1, + 'layers': [7,15,23,31] +} + +model = OpenLLAMAPEFTModel(**args) +delta_ckpt = torch.load(args['delta_ckpt_path'], map_location=torch.device('cpu')) +model.load_state_dict(delta_ckpt, strict=False) +delta_ckpt = torch.load(args['anomalygpt_ckpt_path'], map_location=torch.device('cpu')) +model.load_state_dict(delta_ckpt, strict=False) +model = model.eval() + + +output = None + +"""Override Chatbot.postprocess""" +def postprocess(self, y): + if y is None: + return [] + for i, (message, response) in enumerate(y): + y[i] = ( + None if message is None else mdtex2html.convert((message)), + None if response is None else mdtex2html.convert(response), + ) + return y + + +gr.Chatbot.postprocess = postprocess + + +def parse_text(text): + """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/""" + lines = text.split("\n") + lines = [line for line in lines if line != ""] + count = 0 + for i, line in enumerate(lines): + if "```" in line: + count += 1 + items = line.split('`') + if count % 2 == 1: + lines[i] = f'
'
+            else:
+                lines[i] = f'
' + else: + if i > 0: + if count % 2 == 1: + line = line.replace("`", "\`") + line = line.replace("<", "<") + line = line.replace(">", ">") + line = line.replace(" ", " ") + line = line.replace("*", "*") + line = line.replace("_", "_") + line = line.replace("-", "-") + line = line.replace(".", ".") + line = line.replace("!", "!") + line = line.replace("(", "(") + line = line.replace(")", ")") + line = line.replace("$", "$") + lines[i] = "
"+line + text = "".join(lines) + return text + + +def predict( + input, + image_path, + normal_img_path, + chatbot, + max_length, + top_p, + temperature, + history, + modality_cache, +): + + if image_path is None and normal_img_path is None: + return [(input, "There is no input data provided! Please upload your data and start the conversation.")] + else: + print(f'[!] image path: {image_path}\n[!] normal image path: {normal_img_path}\n') + + # prepare the prompt + prompt_text = '' + for idx, (q, a) in enumerate(history): + if idx == 0: + prompt_text += f'{q}\n### Assistant: {a}\n###' + else: + prompt_text += f' Human: {q}\n### Assistant: {a}\n###' + if len(history) == 0: + prompt_text += f'{input}' + else: + prompt_text += f' Human: {input}' + + response, pixel_output = model.generate({ + 'prompt': prompt_text, + 'image_paths': [image_path] if image_path else [], + 'normal_img_paths': [normal_img_path] if normal_img_path else [], + 'audio_paths': [], + 'video_paths': [], + 'thermal_paths': [], + 'top_p': top_p, + 'temperature': temperature, + 'max_tgt_len': max_length, + 'modality_embeds': modality_cache + },web_demo=True) + chatbot.append((parse_text(input), parse_text(response))) + history.append((input, response)) + + + plt.imshow(pixel_output.reshape(224,224).detach().cpu(), cmap='binary_r') + plt.axis('off') + plt.savefig('output.png',bbox_inches='tight',pad_inches = 0) + + target_size = 224 + original_width, original_height = PILImage.open(image_path).size + if original_width > original_height: + new_width = target_size + new_height = int(target_size * (original_height / original_width)) + else: + new_height = target_size + new_width = int(target_size * (original_width / original_height)) + + new_image = PILImage.new('L', (target_size, target_size), 255) # 'L' mode for grayscale + + paste_x = (target_size - new_width) // 2 + paste_y = (target_size - new_height) // 2 + + pixel_output = PILImage.open('output.png').resize((new_width, new_height), PILImage.LANCZOS) + + new_image.paste(pixel_output, (paste_x, paste_y)) + + new_image.save('output.png') + + image = cv2.imread('output.png', cv2.IMREAD_GRAYSCALE) + kernel = np.ones((3, 3), np.uint8) + eroded_image = cv2.erode(image, kernel, iterations=1) + cv2.imwrite('output.png', eroded_image) + + global output + output = PILImage.open('output.png').convert('L') + + + return chatbot, history, modality_cache + + +def get_image(): + global output + return output if output else "ffffff.png" + + +def reset_user_input(): + return gr.update(value='') + +def reset_dialog(): + return [], [] + +def reset_state(): + global output + output = None + return None, None, [], [], [] + + + +with gr.Blocks() as demo: + gr.HTML("""

Demo of AnomalyGPT

""") + + with gr.Row(): + with gr.Column(scale=1): + with gr.Row(scale=3): + image_path = gr.Image(type="filepath", label="Query Image", value=None) + with gr.Row(scale=3): + normal_img_path = gr.Image(type="filepath", label="Normal Image", value=None) + with gr.Row(): + max_length = gr.Slider(0, 512, value=512, step=1.0, label="Maximum length", interactive=True) + with gr.Row(): + top_p = gr.Slider(0, 1, value=0.01, step=0.01, label="Top P", interactive=True) + with gr.Row(): + temperature = gr.Slider(0, 1, value=1.0, step=0.01, label="Temperature", interactive=True) + + + with gr.Column(scale=3): + with gr.Row(): + with gr.Column(scale=6): + chatbot = gr.Chatbot().style(height=415) + with gr.Column(scale=4): + # gr.Image(output) + image_output = gr.Image(value=get_image, label="Localization Output", every=1.0, shape=[224,224]) + with gr.Row(): + user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(container=False) + with gr.Row(): + with gr.Column(scale=2): + submitBtn = gr.Button("Submit", variant="primary") + with gr.Column(scale=1): + emptyBtn = gr.Button("Clear History") + + history = gr.State([]) + modality_cache = gr.State([]) + + submitBtn.click( + predict, [ + user_input, + image_path, + normal_img_path, + chatbot, + max_length, + top_p, + temperature, + history, + modality_cache, + ], [ + chatbot, + history, + modality_cache + ], + show_progress=True + ) + + submitBtn.click(reset_user_input, [], [user_input]) + emptyBtn.click(reset_state, outputs=[ + image_path, + normal_img_path, + chatbot, + history, + modality_cache + ], show_progress=True) + + +demo.queue().launch(server_port=24008) diff --git a/ckpt/train_supervised/pytorch_model.pt b/ckpt/train_supervised/pytorch_model.pt new file mode 100644 index 0000000000000000000000000000000000000000..8490f3115b62afe22671810a53c09720a1da7c66 --- /dev/null +++ b/ckpt/train_supervised/pytorch_model.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe107480518bd77d5f02fb95ce759e010d4236bee23da74c9a1f056dc1f316de +size 225330109 diff --git a/ffffff.png b/ffffff.png new file mode 100644 index 0000000000000000000000000000000000000000..c4e40e842f54456b54db92c02d103129cfebdd5c Binary files /dev/null and b/ffffff.png differ diff --git a/header.py b/header.py new file mode 100644 index 0000000000000000000000000000000000000000..97338165d32d531838566ade9c9217182bb8ea67 --- /dev/null +++ b/header.py @@ -0,0 +1,35 @@ +import torch +import datetime +import types +import deepspeed +from transformers.deepspeed import HfDeepSpeedConfig +import transformers +import numpy as np +from collections import OrderedDict +from torch.utils.data import Dataset, DataLoader +from torch.nn.utils import clip_grad_norm_ +from torch.cuda.amp import autocast, GradScaler +from torch.nn import DataParallel +from torch.optim import lr_scheduler +import torch.optim as optim +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm +import os +import re +import math +import random +import json +import time +import logging +from copy import deepcopy +import ipdb +import argparse +import data +from transformers import LlamaTokenizer, LlamaForCausalLM, LlamaConfig +from torch.nn.utils.rnn import pad_sequence +from peft import LoraConfig, TaskType, get_peft_model + +logging.getLogger("transformers").setLevel(logging.WARNING) +logging.getLogger("transformers.tokenization_utils").setLevel(logging.ERROR) +os.environ['TOKENIZERS_PARALLELISM'] = 'false' diff --git a/model/AnomalyGPT_models.py b/model/AnomalyGPT_models.py new file mode 100644 index 0000000000000000000000000000000000000000..5c4f4feb045091f6cc1310795bb10915ca60c531 --- /dev/null +++ b/model/AnomalyGPT_models.py @@ -0,0 +1,73 @@ +import torch +from torch import nn +import numpy as np +# from datas.dataset_3d import * +from torch.nn import functional as F + + +class Normalize(nn.Module): + def __init__(self, dim: int) -> None: + super().__init__() + self.dim = dim + + def forward(self, x): + return torch.nn.functional.normalize(x, dim=self.dim, p=2) + + +class LinearLayer(nn.Module): + def __init__(self, dim_in, dim_out, k): + super(LinearLayer, self).__init__() + self.fc = nn.ModuleList([nn.Linear(dim_in, dim_out) for i in range(k)]) + + def forward(self, tokens): + for i in range(len(tokens)): + if len(tokens[i].shape) == 3: + tokens[i] = tokens[i].transpose(0,1) + tokens[i] = self.fc[i](tokens[i][:, 1:, :]) + else: + B, C, H, W = tokens[i].shape + tokens[i] = self.fc[i](tokens[i].view(B, C, -1).permute(0, 2, 1).contiguous()) + return tokens + +class PromptLearner(nn.Module): + def __init__(self, dim_in, dim_out) -> None: + super().__init__() + self.meta_net = nn.Sequential( + nn.Conv2d(dim_in, dim_in * 4, kernel_size=3, padding=1), + # nn.BatchNorm2d(dim_in * 4), + nn.ReLU(inplace=True), + nn.MaxPool2d(2), # 112 * 112 + + nn.Conv2d(dim_in * 4, dim_in * 16, kernel_size=3, padding=1), + # nn.BatchNorm2d(dim_in * 16), + nn.ReLU(inplace=True), + nn.MaxPool2d(2), # 56 * 56 + + nn.Conv2d(dim_in * 16, dim_in * 64, kernel_size=3, padding=1), + # nn.BatchNorm2d(dim_in * 64), + nn.ReLU(inplace=True), + nn.MaxPool2d(2), # 28 * 28 + + nn.Conv2d(dim_in * 64, dim_in * 256, kernel_size=3, padding=1), + # nn.BatchNorm2d(dim_in * 256), + nn.ReLU(inplace=True), + nn.MaxPool2d(2), # 14 * 14 + + nn.Conv2d(dim_in * 256, dim_in * 1024, kernel_size=3, padding=1), + # nn.BatchNorm2d(dim_in * 1024), + nn.ReLU(inplace=True), + nn.MaxPool2d(2), # 7 * 7 + + nn.Conv2d(dim_in * 1024, dim_out, kernel_size=5, padding=0), + # nn.BatchNorm2d(dim_out), + # nn.ReLU(inplace=True), + ) + self.base_prompts = nn.Parameter(torch.randn((9, dim_out)),requires_grad=True) + + def forward(self, input): + B,C,H,W = input.shape + img_prompts = self.meta_net(input) + # print(input.shape, img_prompts.shape) + img_prompts = img_prompts.reshape(B,4096,9).transpose(-2,-1) + output = torch.cat([self.base_prompts.expand(B,-1,-1), img_prompts], dim=1) + return output \ No newline at end of file diff --git a/model/ImageBind/CODE_OF_CONDUCT.md b/model/ImageBind/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..f913b6a55a6c5ab6e1224e11fc039c3d4c3b6283 --- /dev/null +++ b/model/ImageBind/CODE_OF_CONDUCT.md @@ -0,0 +1,80 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic +address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when there is a +reasonable belief that an individual's behavior may have a negative impact on +the project or its community. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq \ No newline at end of file diff --git a/model/ImageBind/CONTRIBUTING.md b/model/ImageBind/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..63d0b751e8a00b606ddff92e2524faa3c90a63b0 --- /dev/null +++ b/model/ImageBind/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to ImageBind +We want to make contributing to this project as easy and transparent as +possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `main`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Meta's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## License +By contributing to Omnivore, you agree that your contributions will be licensed +under the [LICENSE](LICENSE) file in the root directory of this source tree. diff --git a/model/ImageBind/LICENSE b/model/ImageBind/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..bfef380bf7d9cb74ec9ba533b37c3fbeef3bdc09 --- /dev/null +++ b/model/ImageBind/LICENSE @@ -0,0 +1,437 @@ +Attribution-NonCommercial-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International +Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-NonCommercial-ShareAlike 4.0 International Public License +("Public License"). To the extent this Public License may be +interpreted as a contract, You are granted the Licensed Rights in +consideration of Your acceptance of these terms and conditions, and the +Licensor grants You such rights in consideration of benefits the +Licensor receives from making the Licensed Material available under +these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-NC-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution, NonCommercial, and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. NonCommercial means not primarily intended for or directed towards + commercial advantage or monetary compensation. For purposes of + this Public License, the exchange of the Licensed Material for + other material subject to Copyright and Similar Rights by digital + file-sharing or similar means is NonCommercial provided there is + no payment of monetary compensation in connection with the + exchange. + + l. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + m. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + n. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part, for NonCommercial purposes only; and + + b. produce, reproduce, and Share Adapted Material for + NonCommercial purposes only. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties, including when + the Licensed Material is used other than for NonCommercial + purposes. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-NC-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database for NonCommercial purposes + only; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + including for purposes of Section 3(b); and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. \ No newline at end of file diff --git a/model/ImageBind/README.md b/model/ImageBind/README.md new file mode 100644 index 0000000000000000000000000000000000000000..028fa988bb6cd9843aec9454636e1541b53680e7 --- /dev/null +++ b/model/ImageBind/README.md @@ -0,0 +1,155 @@ +# ImageBind: One Embedding Space To Bind Them All + +**[FAIR, Meta AI](https://ai.facebook.com/research/)** + +Rohit Girdhar*, +Alaaeldin El-Nouby*, +Zhuang Liu, +Mannat Singh, +Kalyan Vasudev Alwala, +Armand Joulin, +Ishan Misra* + +To appear at CVPR 2023 (*Highlighted paper*) + +[[`Paper`](https://facebookresearch.github.io/ImageBind/paper)] [[`Blog`](https://ai.facebook.com/blog/imagebind-six-modalities-binding-ai/)] [[`Demo`](https://imagebind.metademolab.com/)] [[`Supplementary Video`](https://dl.fbaipublicfiles.com/imagebind/imagebind_video.mp4)] [[`BibTex`](#citing-imagebind)] + +PyTorch implementation and pretrained models for ImageBind. For details, see the paper: **[ImageBind: One Embedding Space To Bind Them All](https://facebookresearch.github.io/ImageBind/paper)**. + +ImageBind learns a joint embedding across six different modalities - images, text, audio, depth, thermal, and IMU data. It enables novel emergent applications ‘out-of-the-box’ including cross-modal retrieval, composing modalities with arithmetic, cross-modal detection and generation. + + + +![ImageBind](https://user-images.githubusercontent.com/8495451/236859695-ffa13364-3e39-4d99-a8da-fbfab17f9a6b.gif) + +## ImageBind model + +Emergent zero-shot classification performance. + + + + + + + + + + + + + + + + + + + + + + + +
ModelIN1kK400NYU-DESCLLVIPEgo4Ddownload
imagebind_huge77.750.054.066.963.425.0checkpoint
+ +## Usage + +Install pytorch 1.13+ and other 3rd party dependencies. + +```shell +conda create --name imagebind python=3.8 -y +conda activate imagebind + +pip install -r requirements.txt +``` + +For windows users, you might need to install `soundfile` for reading/writing audio files. (Thanks @congyue1977) + +``` +pip install soundfile +``` + + +Extract and compare features across modalities (e.g. Image, Text and Audio). + +```python +import data +import torch +from models import imagebind_model +from models.imagebind_model import ModalityType + +text_list=["A dog.", "A car", "A bird"] +image_paths=[".assets/dog_image.jpg", ".assets/car_image.jpg", ".assets/bird_image.jpg"] +audio_paths=[".assets/dog_audio.wav", ".assets/car_audio.wav", ".assets/bird_audio.wav"] + +device = "cuda:0" if torch.cuda.is_available() else "cpu" + +# Instantiate model +model = imagebind_model.imagebind_huge(pretrained=True) +model.eval() +model.to(device) + +# Load data +inputs = { + ModalityType.TEXT: data.load_and_transform_text(text_list, device), + ModalityType.VISION: data.load_and_transform_vision_data(image_paths, device), + ModalityType.AUDIO: data.load_and_transform_audio_data(audio_paths, device), +} + +with torch.no_grad(): + embeddings = model(inputs) + +print( + "Vision x Text: ", + torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T, dim=-1), +) +print( + "Audio x Text: ", + torch.softmax(embeddings[ModalityType.AUDIO] @ embeddings[ModalityType.TEXT].T, dim=-1), +) +print( + "Vision x Audio: ", + torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.AUDIO].T, dim=-1), +) + +# Expected output: +# +# Vision x Text: +# tensor([[9.9761e-01, 2.3694e-03, 1.8612e-05], +# [3.3836e-05, 9.9994e-01, 2.4118e-05], +# [4.7997e-05, 1.3496e-02, 9.8646e-01]]) +# +# Audio x Text: +# tensor([[1., 0., 0.], +# [0., 1., 0.], +# [0., 0., 1.]]) +# +# Vision x Audio: +# tensor([[0.8070, 0.1088, 0.0842], +# [0.1036, 0.7884, 0.1079], +# [0.0018, 0.0022, 0.9960]]) + +``` + +## Model card +Please see the [model card](model_card.md) for details. + +## License + +ImageBind code and model weights are released under the CC-BY-NC 4.0 license. See [LICENSE](LICENSE) for additional details. + +## Contributing + +See [contributing](CONTRIBUTING.md) and the [code of conduct](CODE_OF_CONDUCT.md). + +## Citing ImageBind + +If you find this repository useful, please consider giving a star :star: and citation + +``` +@inproceedings{girdhar2023imagebind, + title={ImageBind: One Embedding Space To Bind Them All}, + author={Girdhar, Rohit and El-Nouby, Alaaeldin and Liu, Zhuang +and Singh, Mannat and Alwala, Kalyan Vasudev and Joulin, Armand and Misra, Ishan}, + booktitle={CVPR}, + year={2023} +} +``` diff --git a/model/ImageBind/__init__.py b/model/ImageBind/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d872d0725710d6dde3af3b6e05382922f074338b --- /dev/null +++ b/model/ImageBind/__init__.py @@ -0,0 +1,2 @@ +from .models import imagebind_model +from .models.imagebind_model import ModalityType diff --git a/model/ImageBind/bpe/bpe_simple_vocab_16e6.txt.gz b/model/ImageBind/bpe/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000000000000000000000000000000000000..36a15856e00a06a9fbed8cdd34d2393fea4a3113 --- /dev/null +++ b/model/ImageBind/bpe/bpe_simple_vocab_16e6.txt.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a +size 1356917 diff --git a/model/ImageBind/data.py b/model/ImageBind/data.py new file mode 100644 index 0000000000000000000000000000000000000000..f0af53db0b810edfc4ab9ae113db71d9704de080 --- /dev/null +++ b/model/ImageBind/data.py @@ -0,0 +1,399 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math + +import torch +import torch.nn as nn +import torchaudio +import logging + +from .models.multimodal_preprocessors import SimpleTokenizer +from PIL import Image +from pytorchvideo import transforms as pv_transforms +from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler +from pytorchvideo.data.encoded_video import EncodedVideo + +from torchvision import transforms +from torchvision.transforms._transforms_video import NormalizeVideo + +DEFAULT_AUDIO_FRAME_SHIFT_MS = 10 # in milliseconds + +BPE_PATH = "/data/guzhaopeng/PandaGPT/code/model/ImageBind/bpe/bpe_simple_vocab_16e6.txt.gz" + + +def waveform2melspec(waveform, sample_rate, num_mel_bins, target_length): + # Based on https://github.com/YuanGongND/ast/blob/d7d8b4b8e06cdaeb6c843cdb38794c1c7692234c/src/dataloader.py#L102 + waveform -= waveform.mean() + fbank = torchaudio.compliance.kaldi.fbank( + waveform, + htk_compat=True, + sample_frequency=sample_rate, + use_energy=False, + window_type="hanning", + num_mel_bins=num_mel_bins, + dither=0.0, + frame_length=25, + frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS, + ) + # Convert to [mel_bins, num_frames] shape + fbank = fbank.transpose(0, 1) + # Pad to target_length + n_frames = fbank.size(1) + p = target_length - n_frames + # if p is too large (say >20%), flash a warning + if abs(p) / n_frames > 0.2: + logging.warning( + "Large gap between audio n_frames(%d) and " + "target_length (%d). Is the audio_target_length " + "setting correct?", + n_frames, + target_length, + ) + # cut and pad + if p > 0: + fbank = torch.nn.functional.pad(fbank, (0, p), mode="constant", value=0) + elif p < 0: + fbank = fbank[:, 0:target_length] + # Convert to [1, mel_bins, num_frames] shape, essentially like a 1 + # channel image + fbank = fbank.unsqueeze(0) + return fbank + + +def get_clip_timepoints(clip_sampler, duration): + # Read out all clips in this video + all_clips_timepoints = [] + is_last_clip = False + end = 0.0 + while not is_last_clip: + start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None) + all_clips_timepoints.append((start, end)) + return all_clips_timepoints + + +def load_and_transform_vision_data(image_paths, device): + if image_paths is None: + return None + + image_ouputs = [] + for image_path in image_paths: + data_transform = transforms.Compose( + [ + transforms.Resize( + 224, interpolation=transforms.InterpolationMode.BICUBIC + ), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize( + mean=(0.48145466, 0.4578275, 0.40821073), + std=(0.26862954, 0.26130258, 0.27577711), + ), + ] + ) + with open(image_path, "rb") as fopen: + image = Image.open(fopen).convert("RGB") + + image = data_transform(image).to(device) + image_ouputs.append(image) + return torch.stack(image_ouputs, dim=0) + + +def load_and_transform_vision_data_for_web_demo(image_paths, device): + if image_paths is None: + return None + + image_ouputs = [] + for image_path in image_paths: + data_transform = transforms.Compose( + [ + transforms.Resize( + (224,224), interpolation=transforms.InterpolationMode.BICUBIC + ), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize( + mean=(0.48145466, 0.4578275, 0.40821073), + std=(0.26862954, 0.26130258, 0.27577711), + ), + ] + ) + with open(image_path, "rb") as fopen: + image = Image.open(fopen).convert("RGB") + + image = data_transform(image).to(device) + image_ouputs.append(image) + return torch.stack(image_ouputs, dim=0) + + +def load_and_transform_thermal_data(thermal_paths, device): + if thermal_paths is None: + return None + + thermal_ouputs = [] + for thermal_path in thermal_paths: + data_transform = transforms.Compose( + [ + transforms.Resize( + 224, interpolation=transforms.InterpolationMode.BICUBIC + ), + transforms.CenterCrop(224), + transforms.ToTensor(), + ] + ) + with open(thermal_path, "rb") as fopen: + thermal = Image.open(fopen).convert("L") + thermal = data_transform(thermal).to(device) + thermal_ouputs.append(thermal) + return torch.stack(thermal_ouputs, dim=0) + + +def load_and_transform_text(text, device): + if text is None: + return None + tokenizer = SimpleTokenizer(bpe_path=BPE_PATH) + tokens = [tokenizer(t).unsqueeze(0).to(device) for t in text] + tokens = torch.cat(tokens, dim=0) + return tokens + + +def load_and_transform_audio_data( + audio_paths, + device, + num_mel_bins=128, + target_length=204, + sample_rate=16000, + clip_duration=2, + clips_per_video=3, + mean=-4.268, + std=9.138, +): + if audio_paths is None: + return None + + audio_outputs = [] + clip_sampler = ConstantClipsPerVideoSampler( + clip_duration=clip_duration, clips_per_video=clips_per_video + ) + + for audio_path in audio_paths: + waveform, sr = torchaudio.load(audio_path) + if sample_rate != sr: + waveform = torchaudio.functional.resample( + waveform, orig_freq=sr, new_freq=sample_rate + ) + all_clips_timepoints = get_clip_timepoints( + clip_sampler, waveform.size(1) / sample_rate + ) + all_clips = [] + for clip_timepoints in all_clips_timepoints: + waveform_clip = waveform[ + :, + int(clip_timepoints[0] * sample_rate) : int( + clip_timepoints[1] * sample_rate + ), + ] + waveform_melspec = waveform2melspec( + waveform_clip, sample_rate, num_mel_bins, target_length + ) + all_clips.append(waveform_melspec) + + normalize = transforms.Normalize(mean=mean, std=std) + all_clips = [normalize(ac).to(device) for ac in all_clips] + + all_clips = torch.stack(all_clips, dim=0) + audio_outputs.append(all_clips) + + return torch.stack(audio_outputs, dim=0) + + +def get_clip_timepoints(clip_sampler, duration): + # Read out all clips in this video + all_clips_timepoints = [] + is_last_clip = False + end = 0.0 + while not is_last_clip: + start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None) + all_clips_timepoints.append((start, end)) + return all_clips_timepoints + + +def crop_boxes(boxes, x_offset, y_offset): + """ + Peform crop on the bounding boxes given the offsets. + Args: + boxes (ndarray or None): bounding boxes to peform crop. The dimension + is `num boxes` x 4. + x_offset (int): cropping offset in the x axis. + y_offset (int): cropping offset in the y axis. + Returns: + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + cropped_boxes = boxes.copy() + cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset + cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset + + return cropped_boxes + + +def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): + """ + Perform uniform spatial sampling on the images and corresponding boxes. + Args: + images (tensor): images to perform uniform crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): size of height and weight to crop the images. + spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width + is larger than height. Or 0, 1, or 2 for top, center, and bottom + crop if height is larger than width. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + scale_size (int): optinal. If not None, resize the images to scale_size before + performing any crop. + Returns: + cropped (tensor): images with dimension of + `num frames` x `channel` x `size` x `size`. + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + assert spatial_idx in [0, 1, 2] + ndim = len(images.shape) + if ndim == 3: + images = images.unsqueeze(0) + height = images.shape[2] + width = images.shape[3] + + if scale_size is not None: + if width <= height: + width, height = scale_size, int(height / width * scale_size) + else: + width, height = int(width / height * scale_size), scale_size + images = torch.nn.functional.interpolate( + images, + size=(height, width), + mode="bilinear", + align_corners=False, + ) + + y_offset = int(math.ceil((height - size) / 2)) + x_offset = int(math.ceil((width - size) / 2)) + + if height > width: + if spatial_idx == 0: + y_offset = 0 + elif spatial_idx == 2: + y_offset = height - size + else: + if spatial_idx == 0: + x_offset = 0 + elif spatial_idx == 2: + x_offset = width - size + cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size] + cropped_boxes = crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None + if ndim == 3: + cropped = cropped.squeeze(0) + return cropped, cropped_boxes + + +class SpatialCrop(nn.Module): + """ + Convert the video into 3 smaller clips spatially. Must be used after the + temporal crops to get spatial crops, and should be used with + -2 in the spatial crop at the slowfast augmentation stage (so full + frames are passed in here). Will return a larger list with the + 3x spatial crops as well. + """ + + def __init__(self, crop_size: int = 224, num_crops: int = 3): + super().__init__() + self.crop_size = crop_size + if num_crops == 3: + self.crops_to_ext = [0, 1, 2] + self.flipped_crops_to_ext = [] + elif num_crops == 1: + self.crops_to_ext = [1] + self.flipped_crops_to_ext = [] + else: + raise NotImplementedError("Nothing else supported yet") + + def forward(self, videos): + """ + Args: + videos: A list of C, T, H, W videos. + Returns: + videos: A list with 3x the number of elements. Each video converted + to C, T, H', W' by spatial cropping. + """ + assert isinstance(videos, list), "Must be a list of videos after temporal crops" + assert all([video.ndim == 4 for video in videos]), "Must be (C,T,H,W)" + res = [] + for video in videos: + for spatial_idx in self.crops_to_ext: + res.append(uniform_crop(video, self.crop_size, spatial_idx)[0]) + if not self.flipped_crops_to_ext: + continue + flipped_video = transforms.functional.hflip(video) + for spatial_idx in self.flipped_crops_to_ext: + res.append(uniform_crop(flipped_video, self.crop_size, spatial_idx)[0]) + return res + + +def load_and_transform_video_data( + video_paths, + device, + clip_duration=2, + clips_per_video=5, + sample_rate=16000, +): + if video_paths is None: + return None + + video_outputs = [] + video_transform = transforms.Compose( + [ + pv_transforms.ShortSideScale(224), + NormalizeVideo( + mean=(0.48145466, 0.4578275, 0.40821073), + std=(0.26862954, 0.26130258, 0.27577711), + ), + ] + ) + + clip_sampler = ConstantClipsPerVideoSampler( + clip_duration=clip_duration, clips_per_video=clips_per_video + ) + frame_sampler = pv_transforms.UniformTemporalSubsample(num_samples=clip_duration) + + for video_path in video_paths: + video = EncodedVideo.from_path( + video_path, + decoder="decord", + decode_audio=False, + **{"sample_rate": sample_rate}, + ) + + all_clips_timepoints = get_clip_timepoints(clip_sampler, video.duration) + + all_video = [] + for clip_timepoints in all_clips_timepoints: + # Read the clip, get frames + clip = video.get_clip(clip_timepoints[0], clip_timepoints[1]) + if clip is None: + raise ValueError("No clip found") + video_clip = frame_sampler(clip["video"]) + video_clip = video_clip / 255.0 # since this is float, need 0-1 + + all_video.append(video_clip) + + all_video = [video_transform(clip) for clip in all_video] + all_video = SpatialCrop(224, num_crops=3)(all_video) + + all_video = torch.stack(all_video, dim=0) + video_outputs.append(all_video) + + return torch.stack(video_outputs, dim=0).to(device) diff --git a/model/ImageBind/model_card.md b/model/ImageBind/model_card.md new file mode 100644 index 0000000000000000000000000000000000000000..c7bb26500b6590b64ffa6350f37be80dc88612d8 --- /dev/null +++ b/model/ImageBind/model_card.md @@ -0,0 +1,94 @@ +# Model Card for ImageBind + +Multimodal joint embedding model for image/video, text, audio, depth, IMU, and thermal images. +Input any of the six modalities and get the same sized embedding that can be used for cross-modal and multimodal tasks. + +# Model Details + +## Model Description + + +Multimodal joint embedding model for image/video, text, audio, depth, IMU, and thermal images + +- **Developed by:** Meta AI +- **Model type:** Multimodal model +- **Language(s) (NLP):** en +- **License:** CC BY-NC-SA 4.0 +- **Resources for more information:** + - [GitHub Repo](https://github.com/facebookresearch/ImageBind) + + +# Uses + + +This model is intended only for research purposes. It provides a joint embedding space for different modalities -- image/video, text, audio, depth, IMU and thermal images. +We hope that these joint embeddings can be used for a variety of different cross-modal research, e.g., cross-modal retrieval and combining embeddings from different modalities. + +## Out-of-Scope Use + + + + +This model is *NOT* intended to be used in any real world application -- commercial or otherwise. +It may produce harmful associations with different inputs. +The model needs to be investigated and likely re-trained on specific data for any such application. +The model is expected to work better on web-based visual data since it was trained on such data. +The text encoder is likely to work only on English language text because of the underlying training datasets. + +# Bias, Risks, and Limitations + + +Open-domain joint embedding models are prone to producing specific biases, e.g., study from [CLIP](https://github.com/openai/CLIP/blob/main/model-card.md#bias-and-fairness). +Since our model uses such models as initialization, it will exhibit such biases too. +Moreover, for learning joint embeddings for other modalities such as audio, thermal, depth, and IMU we leverage datasets that are relatively small. These joint embeddings are thus limited to the concepts present in the datasets. For example, the thermal datasets we used are limited to outdoor street scenes, while the depth datasets are limited to indoor scenes. + + + +# Training Details + +## Training Data + + + +ImageBind uses image-paired data for training -- (image, X) where X is one of text, audio, depth, IMU or thermal data. +In particular, we initialize and freeze the image and text encoders using an OpenCLIP ViT-H encoder. +We train audio embeddings using Audioset, depth embeddings using the SUN RGB-D dataset, IMU using the Ego4D dataset and thermal embeddings using the LLVIP dataset. +We provide the exact training data details in the paper. + + +## Training Procedure + + +Please refer to the research paper and github repo for exact details on this. + +# Evaluation + +## Testing Data, Factors & Metrics + +We evaluate the model on a variety of different classification benchmarks for each modality. +The evaluation details are presented in the paper. +The models performance is measured using standard classification metrics such as accuracy and mAP. + +# Citation + + + +**BibTeX:** +``` +@inproceedings{girdhar2023imagebind, + title={ImageBind: One Embedding Space To Bind Them All}, + author={Girdhar, Rohit and El-Nouby, Alaaeldin and Liu, Zhuang +and Singh, Mannat and Alwala, Kalyan Vasudev and Joulin, Armand and Misra, Ishan}, + booktitle={CVPR}, + year={2023} +} +``` + + +# Model Card Contact + +Please reach out to the authors at: rgirdhar@meta.com imisra@meta.com alaaelnouby@gmail.com + +# How to Get Started with the Model + +Our github repo provides a simple example to extract embeddings from images, audio etc. diff --git a/model/ImageBind/models/__init__.py b/model/ImageBind/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/model/ImageBind/models/__pycache__/__init__.cpython-38.pyc b/model/ImageBind/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3373b55e48fc47cf6b7ba4c3eec637b749a88a9 Binary files /dev/null and b/model/ImageBind/models/__pycache__/__init__.cpython-38.pyc differ diff --git a/model/ImageBind/models/__pycache__/helpers.cpython-38.pyc b/model/ImageBind/models/__pycache__/helpers.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17da286902b09c0febfbf413224f08d5e42bf727 Binary files /dev/null and b/model/ImageBind/models/__pycache__/helpers.cpython-38.pyc differ diff --git a/model/ImageBind/models/__pycache__/imagebind_model.cpython-38.pyc b/model/ImageBind/models/__pycache__/imagebind_model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53f528bf801a2e3cc539149a4e634a9ee6bb780b Binary files /dev/null and b/model/ImageBind/models/__pycache__/imagebind_model.cpython-38.pyc differ diff --git a/model/ImageBind/models/__pycache__/multimodal_preprocessors.cpython-38.pyc b/model/ImageBind/models/__pycache__/multimodal_preprocessors.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e388fc0168a83f5dceaab99eef096bddc56a8c05 Binary files /dev/null and b/model/ImageBind/models/__pycache__/multimodal_preprocessors.cpython-38.pyc differ diff --git a/model/ImageBind/models/__pycache__/transformer.cpython-38.pyc b/model/ImageBind/models/__pycache__/transformer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ba6d5087e18cdaf5259b6ca5305ee24712ad04b Binary files /dev/null and b/model/ImageBind/models/__pycache__/transformer.cpython-38.pyc differ diff --git a/model/ImageBind/models/helpers.py b/model/ImageBind/models/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..75ef564d98f58f4135c19d0bfaeaddbc8137a00a --- /dev/null +++ b/model/ImageBind/models/helpers.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import einops +import numpy as np +import torch +import torch.nn as nn + + +class Normalize(nn.Module): + def __init__(self, dim: int) -> None: + super().__init__() + self.dim = dim + + def forward(self, x): + return torch.nn.functional.normalize(x, dim=self.dim, p=2) + + +class LearnableLogitScaling(nn.Module): + def __init__( + self, + logit_scale_init: float = 1 / 0.07, + learnable: bool = True, + max_logit_scale: float = 100, + ) -> None: + super().__init__() + self.max_logit_scale = max_logit_scale + self.logit_scale_init = logit_scale_init + self.learnable = learnable + log_logit_scale = torch.ones([]) * np.log(self.logit_scale_init) + if learnable: + self.log_logit_scale = nn.Parameter(log_logit_scale) + else: + self.register_buffer("log_logit_scale", log_logit_scale) + + def forward(self, x): + return torch.clip(self.log_logit_scale.exp(), max=self.max_logit_scale) * x + + def extra_repr(self): + st = f"logit_scale_init={self.logit_scale_init},learnable={self.learnable}," \ + f" max_logit_scale={self.max_logit_scale}" + return st + + +class EinOpsRearrange(nn.Module): + def __init__(self, rearrange_expr: str, **kwargs) -> None: + super().__init__() + self.rearrange_expr = rearrange_expr + self.kwargs = kwargs + + def forward(self, x): + assert isinstance(x, torch.Tensor) + return einops.rearrange(x, self.rearrange_expr, **self.kwargs) + + +class VerboseNNModule(nn.Module): + """ + Wrapper around nn.Module that prints registered buffers and parameter names. + """ + + @staticmethod + def get_readable_tensor_repr(name: str, tensor: torch.Tensor) -> str: + st = ( + "(" + + name + + "): " + + "tensor(" + + str(tuple(tensor[1].shape)) + + ", requires_grad=" + + str(tensor[1].requires_grad) + + ")\n" + ) + return st + + def extra_repr(self) -> str: + named_modules = set() + for p in self.named_modules(): + named_modules.update([p[0]]) + named_modules = list(named_modules) + + string_repr = "" + for p in self.named_parameters(): + name = p[0].split(".")[0] + if name not in named_modules: + string_repr += self.get_readable_tensor_repr(name, p) + + for p in self.named_buffers(): + name = p[0].split(".")[0] + string_repr += self.get_readable_tensor_repr(name, p) + + return string_repr + + +def cast_if_src_dtype( + tensor: torch.Tensor, src_dtype: torch.dtype, tgt_dtype: torch.dtype +): + updated = False + if tensor.dtype == src_dtype: + tensor = tensor.to(dtype=tgt_dtype) + updated = True + return tensor, updated + + +class QuickGELU(nn.Module): + # From https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/clip/model.py#L166 + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class SelectElement(nn.Module): + def __init__(self, index) -> None: + super().__init__() + self.index = index + + def forward(self, x): + assert x.ndim >= 3 + return x[:, self.index, ...] + +class SelectEOSAndProject(nn.Module): + """ + Text Pooling used in OpenCLIP + """ + + def __init__(self, proj: nn.Module) -> None: + super().__init__() + self.proj = proj + + def forward(self, x, seq_len): + assert x.ndim == 3 + # x is of shape B x L x D + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), seq_len] + x = self.proj(x) + return x diff --git a/model/ImageBind/models/imagebind_model.py b/model/ImageBind/models/imagebind_model.py new file mode 100644 index 0000000000000000000000000000000000000000..1142cc15571830f4d148db8f8cf85f47e0b4a6bb --- /dev/null +++ b/model/ImageBind/models/imagebind_model.py @@ -0,0 +1,527 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging +import os +from functools import partial +from types import SimpleNamespace + +import torch +import torch.nn as nn +# from pytorch_lightning.utilities import rank_zero_only +from .helpers import (EinOpsRearrange, LearnableLogitScaling, Normalize, + SelectElement, SelectEOSAndProject) +from .multimodal_preprocessors import (AudioPreprocessor, + IMUPreprocessor, PadIm2Video, + PatchEmbedGeneric, + RGBDTPreprocessor, + SpatioTemporalPosEmbeddingHelper, + TextPreprocessor, + ThermalPreprocessor) +from .transformer import MultiheadAttention, SimpleTransformer + +ModalityType = SimpleNamespace( + VISION="vision", + TEXT="text", + AUDIO="audio", + THERMAL="thermal", + DEPTH="depth", + IMU="imu", + POINT="point", +) + + +class ImageBindModel(nn.Module): + def __init__( + self, + video_frames=2, + kernel_size=(2, 14, 14), + audio_kernel_size=16, + audio_stride=10, + out_embed_dim=768, + vision_embed_dim=1024, + vision_num_blocks=24, + vision_num_heads=16, + audio_embed_dim=768, + audio_num_blocks=12, + audio_num_heads=12, + audio_num_mel_bins=128, + audio_target_len=204, + audio_drop_path=0.1, + text_embed_dim=768, + text_num_blocks=12, + text_num_heads=12, + depth_embed_dim=384, + depth_kernel_size=16, + depth_num_blocks=12, + depth_num_heads=8, + depth_drop_path=0.0, + thermal_embed_dim=768, + thermal_kernel_size=16, + thermal_num_blocks=12, + thermal_num_heads=12, + thermal_drop_path=0.0, + imu_embed_dim=512, + imu_kernel_size=8, + imu_num_blocks=6, + imu_num_heads=8, + imu_drop_path=0.7, + layers = [7,15,23,31] + ): + super().__init__() + + self.out_layers = layers + + self.modality_preprocessors = self._create_modality_preprocessors( + video_frames, + vision_embed_dim, + kernel_size, + text_embed_dim, + audio_embed_dim, + audio_kernel_size, + audio_stride, + audio_num_mel_bins, + audio_target_len, + depth_embed_dim, + depth_kernel_size, + thermal_embed_dim, + thermal_kernel_size, + imu_embed_dim, + ) + + self.modality_trunks = self._create_modality_trunks( + vision_embed_dim, + vision_num_blocks, + vision_num_heads, + text_embed_dim, + text_num_blocks, + text_num_heads, + audio_embed_dim, + audio_num_blocks, + audio_num_heads, + audio_drop_path, + depth_embed_dim, + depth_num_blocks, + depth_num_heads, + depth_drop_path, + thermal_embed_dim, + thermal_num_blocks, + thermal_num_heads, + thermal_drop_path, + imu_embed_dim, + imu_num_blocks, + imu_num_heads, + imu_drop_path, + ) + + self.modality_heads = self._create_modality_heads( + out_embed_dim, + vision_embed_dim, + text_embed_dim, + audio_embed_dim, + depth_embed_dim, + thermal_embed_dim, + imu_embed_dim, + ) + + self.modality_postprocessors = self._create_modality_postprocessors( + out_embed_dim + ) + + + def _create_modality_preprocessors( + self, + video_frames=2, + vision_embed_dim=1024, + kernel_size=(2, 14, 14), + text_embed_dim=768, + audio_embed_dim=768, + audio_kernel_size=16, + audio_stride=10, + audio_num_mel_bins=128, + audio_target_len=204, + depth_embed_dim=768, + depth_kernel_size=16, + thermal_embed_dim=768, + thermal_kernel_size=16, + imu_embed_dim=512, + ): + rgbt_stem = PatchEmbedGeneric( + proj_stem=[ + PadIm2Video(pad_type="repeat", ntimes=2), + nn.Conv3d( + in_channels=3, + kernel_size=kernel_size, + out_channels=vision_embed_dim, + stride=kernel_size, + bias=False, + ), + ] + ) + rgbt_preprocessor = RGBDTPreprocessor( + img_size=[3, video_frames, 224, 224], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + rgbt_stem=rgbt_stem, + depth_stem=None, + ) + + text_preprocessor = TextPreprocessor( + context_length=77, + vocab_size=49408, + embed_dim=text_embed_dim, + causal_masking=True, + ) + + audio_stem = PatchEmbedGeneric( + proj_stem=[ + nn.Conv2d( + in_channels=1, + kernel_size=audio_kernel_size, + stride=audio_stride, + out_channels=audio_embed_dim, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=audio_embed_dim), + ) + audio_preprocessor = AudioPreprocessor( + img_size=[1, audio_num_mel_bins, audio_target_len], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + audio_stem=audio_stem, + ) + + depth_stem = PatchEmbedGeneric( + [ + nn.Conv2d( + kernel_size=depth_kernel_size, + in_channels=1, + out_channels=depth_embed_dim, + stride=depth_kernel_size, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=depth_embed_dim), + ) + + depth_preprocessor = RGBDTPreprocessor( + img_size=[1, 224, 224], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + rgbt_stem=None, + depth_stem=depth_stem, + ) + + thermal_stem = PatchEmbedGeneric( + [ + nn.Conv2d( + kernel_size=thermal_kernel_size, + in_channels=1, + out_channels=thermal_embed_dim, + stride=thermal_kernel_size, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=thermal_embed_dim), + ) + thermal_preprocessor = ThermalPreprocessor( + img_size=[1, 224, 224], + num_cls_tokens=1, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + thermal_stem=thermal_stem, + ) + + imu_stem = PatchEmbedGeneric( + [ + nn.Linear( + in_features=48, + out_features=imu_embed_dim, + bias=False, + ), + ], + norm_layer=nn.LayerNorm(normalized_shape=imu_embed_dim), + ) + + imu_preprocessor = IMUPreprocessor( + img_size=[6, 2000], + num_cls_tokens=1, + kernel_size=8, + embed_dim=imu_embed_dim, + pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), + imu_stem=imu_stem, + ) + + modality_preprocessors = { + ModalityType.VISION: rgbt_preprocessor, + ModalityType.TEXT: text_preprocessor, + ModalityType.AUDIO: audio_preprocessor, + ModalityType.DEPTH: depth_preprocessor, + ModalityType.THERMAL: thermal_preprocessor, + ModalityType.IMU: imu_preprocessor, + } + + return nn.ModuleDict(modality_preprocessors) + + def _create_modality_trunks( + self, + vision_embed_dim=1024, + vision_num_blocks=24, + vision_num_heads=16, + text_embed_dim=768, + text_num_blocks=12, + text_num_heads=12, + audio_embed_dim=768, + audio_num_blocks=12, + audio_num_heads=12, + audio_drop_path=0.0, + depth_embed_dim=768, + depth_num_blocks=12, + depth_num_heads=12, + depth_drop_path=0.0, + thermal_embed_dim=768, + thermal_num_blocks=12, + thermal_num_heads=12, + thermal_drop_path=0.0, + imu_embed_dim=512, + imu_num_blocks=6, + imu_num_heads=8, + imu_drop_path=0.7, + ): + def instantiate_trunk( + embed_dim, num_blocks, num_heads, pre_transformer_ln, add_bias_kv, drop_path + ): + return SimpleTransformer( + embed_dim=embed_dim, + num_blocks=num_blocks, + ffn_dropout_rate=0.0, + drop_path_rate=drop_path, + attn_target=partial( + MultiheadAttention, + embed_dim=embed_dim, + num_heads=num_heads, + bias=True, + add_bias_kv=add_bias_kv, + ), + pre_transformer_layer=nn.Sequential( + nn.LayerNorm(embed_dim, eps=1e-6) + if pre_transformer_ln + else nn.Identity(), + EinOpsRearrange("b l d -> l b d"), + ), + post_transformer_layer=EinOpsRearrange("l b d -> b l d"), + ) + + modality_trunks = {} + modality_trunks[ModalityType.VISION] = instantiate_trunk( + vision_embed_dim, + vision_num_blocks, + vision_num_heads, + pre_transformer_ln=True, + add_bias_kv=False, + drop_path=0.0, + ) + modality_trunks[ModalityType.TEXT] = instantiate_trunk( + text_embed_dim, + text_num_blocks, + text_num_heads, + pre_transformer_ln=False, + add_bias_kv=False, + drop_path=0.0, + ) + modality_trunks[ModalityType.AUDIO] = instantiate_trunk( + audio_embed_dim, + audio_num_blocks, + audio_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=audio_drop_path, + ) + modality_trunks[ModalityType.DEPTH] = instantiate_trunk( + depth_embed_dim, + depth_num_blocks, + depth_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=depth_drop_path, + ) + modality_trunks[ModalityType.THERMAL] = instantiate_trunk( + thermal_embed_dim, + thermal_num_blocks, + thermal_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=thermal_drop_path, + ) + modality_trunks[ModalityType.IMU] = instantiate_trunk( + imu_embed_dim, + imu_num_blocks, + imu_num_heads, + pre_transformer_ln=False, + add_bias_kv=True, + drop_path=imu_drop_path, + ) + + return nn.ModuleDict(modality_trunks) + + def _create_modality_heads( + self, + out_embed_dim, + vision_embed_dim, + text_embed_dim, + audio_embed_dim, + depth_embed_dim, + thermal_embed_dim, + imu_embed_dim, + ): + modality_heads = {} + + modality_heads[ModalityType.VISION] = nn.Sequential( + nn.LayerNorm(normalized_shape=vision_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(vision_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.TEXT] = SelectEOSAndProject( + proj=nn.Sequential( + nn.LayerNorm(normalized_shape=text_embed_dim, eps=1e-6), + nn.Linear(text_embed_dim, out_embed_dim, bias=False), + ) + ) + + modality_heads[ModalityType.AUDIO] = nn.Sequential( + nn.LayerNorm(normalized_shape=audio_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(audio_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.DEPTH] = nn.Sequential( + nn.LayerNorm(normalized_shape=depth_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(depth_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.THERMAL] = nn.Sequential( + nn.LayerNorm(normalized_shape=thermal_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Linear(thermal_embed_dim, out_embed_dim, bias=False), + ) + + modality_heads[ModalityType.IMU] = nn.Sequential( + nn.LayerNorm(normalized_shape=imu_embed_dim, eps=1e-6), + SelectElement(index=0), + nn.Dropout(p=0.5), + nn.Linear(imu_embed_dim, out_embed_dim, bias=False), + ) + + return nn.ModuleDict(modality_heads) + + def _create_modality_postprocessors(self, out_embed_dim): + modality_postprocessors = {} + + modality_postprocessors[ModalityType.VISION] = Normalize(dim=-1) + modality_postprocessors[ModalityType.TEXT] = nn.Sequential( + Normalize(dim=-1), LearnableLogitScaling(learnable=True) + ) + modality_postprocessors[ModalityType.AUDIO] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=20.0, learnable=False), + ) + modality_postprocessors[ModalityType.DEPTH] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=5.0, learnable=False), + ) + modality_postprocessors[ModalityType.THERMAL] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=10.0, learnable=False), + ) + modality_postprocessors[ModalityType.IMU] = nn.Sequential( + Normalize(dim=-1), + LearnableLogitScaling(logit_scale_init=5.0, learnable=False), + ) + + return nn.ModuleDict(modality_postprocessors) + + def forward(self, inputs): + outputs = {} + for modality_key, modality_value in inputs.items(): + reduce_list = ( + modality_value.ndim >= 5 + ) # Audio and Video inputs consist of multiple clips + if reduce_list: + B, S = modality_value.shape[:2] + modality_value = modality_value.reshape( + B * S, *modality_value.shape[2:] + ) + + if modality_value is not None: + modality_value = self.modality_preprocessors[modality_key]( + **{modality_key: modality_value} + ) + trunk_inputs = modality_value["trunk"] + head_inputs = modality_value["head"] + + modality_value, modality_full_value = self.modality_trunks[modality_key](**trunk_inputs, out_layers=self.out_layers) + + + modality_value = self.modality_heads[modality_key]( + modality_value, **head_inputs + ) + modality_value = self.modality_postprocessors[modality_key]( + modality_value + ) + + if reduce_list: + modality_value = modality_value.reshape(B, S, -1) + modality_value = modality_value.mean(dim=1) + + outputs[modality_key] = modality_value, modality_full_value + + return outputs + + +def imagebind_huge(args): + + if 'layers' in args: + layers = args['layers'] + else: + layers = [7,15,23,31] + + return ImageBindModel( + vision_embed_dim=1280, + vision_num_blocks=32, + vision_num_heads=16, + text_embed_dim=1024, + text_num_blocks=24, + text_num_heads=16, + out_embed_dim=1024, + audio_drop_path=0.1, + imu_drop_path=0.7, + layers = layers + ), 1024 + + +def save_module(module_dict: nn.ModuleDict, module_name: str = "", + checkpoint_dir: str = "./.checkpoints/full", postfix: str = "_last", + extension: str = "pth"): + try: + torch.save(module_dict.state_dict(), + os.path.join(checkpoint_dir, f"imagebind-{module_name}{postfix}.{extension}")) + logging.info(f"Saved parameters for module {module_name} to {checkpoint_dir}.") + except FileNotFoundError: + logging.warning(f"Could not save module parameters for {module_name} to {checkpoint_dir}.") + + +def load_module(module_dict: nn.ModuleDict, module_name: str = "", + checkpoint_dir: str = "./.checkpoints/full", postfix: str = "_last", + extension: str = "pth"): + try: + module_dict.load_state_dict(torch.load( + os.path.join(checkpoint_dir, f"imagebind-{module_name}{postfix}.{extension}")), strict=False) + logging.info(f"Loaded parameters for module {module_name} from {checkpoint_dir}.") + except FileNotFoundError: + logging.warning(f"Could not load module parameters for {module_name} from {checkpoint_dir}.") \ No newline at end of file diff --git a/model/ImageBind/models/multimodal_preprocessors.py b/model/ImageBind/models/multimodal_preprocessors.py new file mode 100644 index 0000000000000000000000000000000000000000..768c5b9c4f3f9b17b04ee41fec7ca2d99c15335e --- /dev/null +++ b/model/ImageBind/models/multimodal_preprocessors.py @@ -0,0 +1,685 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import gzip +import html +import io +import math +from functools import lru_cache +from typing import Callable, List, Optional, Tuple + +import ftfy +import numpy as np +import regex as re +import torch +import torch.nn as nn +from iopath.common.file_io import g_pathmgr +from timm.models.layers import trunc_normal_ + +from .helpers import VerboseNNModule, cast_if_src_dtype + + +def get_sinusoid_encoding_table(n_position, d_hid): + """Sinusoid position encoding table""" + + # TODO: make it with torch instead of numpy + def get_position_angle_vec(position): + return [ + position / np.power(10000, 2 * (hid_j // 2) / d_hid) + for hid_j in range(d_hid) + ] + + sinusoid_table = np.array( + [get_position_angle_vec(pos_i) for pos_i in range(n_position)] + ) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + + +def interpolate_pos_encoding_2d(target_spatial_size, pos_embed): + N = pos_embed.shape[1] + if N == target_spatial_size: + return pos_embed + dim = pos_embed.shape[-1] + # nn.functional.interpolate doesn't work with bfloat16 so we cast to float32 + pos_embed, updated = cast_if_src_dtype(pos_embed, torch.bfloat16, torch.float32) + pos_embed = nn.functional.interpolate( + pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute( + 0, 3, 1, 2 + ), + scale_factor=math.sqrt(target_spatial_size / N), + mode="bicubic", + ) + if updated: + pos_embed, _ = cast_if_src_dtype(pos_embed, torch.float32, torch.bfloat16) + pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return pos_embed + + +def interpolate_pos_encoding( + npatch_per_img, + pos_embed, + patches_layout, + input_shape=None, + first_patch_idx=1, +): + assert first_patch_idx == 0 or first_patch_idx == 1, "there is 1 CLS token or none" + N = pos_embed.shape[1] - first_patch_idx # since it's 1 if cls_token exists + if npatch_per_img == N: + return pos_embed + + assert ( + patches_layout[-1] == patches_layout[-2] + ), "Interpolation of pos embed not supported for non-square layouts" + + class_emb = pos_embed[:, :first_patch_idx] + pos_embed = pos_embed[:, first_patch_idx:] + + if input_shape is None or patches_layout[0] == 1: + # simple 2D pos embedding, no temporal component + pos_embed = interpolate_pos_encoding_2d(npatch_per_img, pos_embed) + elif patches_layout[0] > 1: + # pos embed has a temporal component + assert len(input_shape) == 4, "temporal interpolation not supported" + # we only support 2D interpolation in this case + num_frames = patches_layout[0] + num_spatial_tokens = patches_layout[1] * patches_layout[2] + pos_embed = pos_embed.view(1, num_frames, num_spatial_tokens, -1) + # interpolate embedding for zeroth frame + pos_embed = interpolate_pos_encoding_2d( + npatch_per_img, pos_embed[0, 0, ...].unsqueeze(0) + ) + else: + raise ValueError("This type of interpolation isn't implemented") + + return torch.cat((class_emb, pos_embed), dim=1) + + +def _get_pos_embedding( + npatch_per_img, + pos_embed, + patches_layout, + input_shape, + first_patch_idx=1, +): + pos_embed = interpolate_pos_encoding( + npatch_per_img, + pos_embed, + patches_layout, + input_shape=input_shape, + first_patch_idx=first_patch_idx, + ) + return pos_embed + + +class PatchEmbedGeneric(nn.Module): + """ + PatchEmbed from Hydra + """ + + def __init__(self, proj_stem, norm_layer: Optional[nn.Module] = None): + super().__init__() + + if len(proj_stem) > 1: + self.proj = nn.Sequential(*proj_stem) + else: + # Special case to be able to load pre-trained models that were + # trained with a standard stem + self.proj = proj_stem[0] + self.norm_layer = norm_layer + + def get_patch_layout(self, img_size): + with torch.no_grad(): + dummy_img = torch.zeros( + [ + 1, + ] + + img_size + ) + dummy_out = self.proj(dummy_img) + embed_dim = dummy_out.shape[1] + patches_layout = tuple(dummy_out.shape[2:]) + num_patches = np.prod(patches_layout) + return patches_layout, num_patches, embed_dim + + def forward(self, x): + x = self.proj(x) + # B C (T) H W -> B (T)HW C + x = x.flatten(2).transpose(1, 2) + if self.norm_layer is not None: + x = self.norm_layer(x) + return x + + +class SpatioTemporalPosEmbeddingHelper(VerboseNNModule): + def __init__( + self, + patches_layout: List, + num_patches: int, + num_cls_tokens: int, + embed_dim: int, + learnable: bool, + ) -> None: + super().__init__() + self.num_cls_tokens = num_cls_tokens + self.patches_layout = patches_layout + self.num_patches = num_patches + self.num_tokens = num_cls_tokens + num_patches + self.learnable = learnable + if self.learnable: + self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, embed_dim)) + trunc_normal_(self.pos_embed, std=0.02) + else: + self.register_buffer( + "pos_embed", get_sinusoid_encoding_table(self.num_tokens, embed_dim) + ) + + def get_pos_embedding(self, vision_input, all_vision_tokens): + input_shape = vision_input.shape + pos_embed = _get_pos_embedding( + all_vision_tokens.size(1) - self.num_cls_tokens, + pos_embed=self.pos_embed, + patches_layout=self.patches_layout, + input_shape=input_shape, + first_patch_idx=self.num_cls_tokens, + ) + return pos_embed + + +class RGBDTPreprocessor(VerboseNNModule): + def __init__( + self, + rgbt_stem: PatchEmbedGeneric, + depth_stem: Optional[PatchEmbedGeneric], + img_size: Tuple = (3, 224, 224), + num_cls_tokens: int = 1, + pos_embed_fn: Optional[Callable] = None, + use_type_embed: bool = False, + init_param_style: str = "openclip", + ) -> None: + super().__init__() + stem = rgbt_stem if rgbt_stem is not None else depth_stem + ( + self.patches_layout, + self.num_patches, + self.embed_dim, + ) = stem.get_patch_layout(img_size) + self.rgbt_stem = rgbt_stem + self.depth_stem = depth_stem + self.use_pos_embed = pos_embed_fn is not None + self.use_type_embed = use_type_embed + self.num_cls_tokens = num_cls_tokens + + if self.use_pos_embed: + self.pos_embedding_helper = pos_embed_fn( + patches_layout=self.patches_layout, + num_cls_tokens=num_cls_tokens, + num_patches=self.num_patches, + embed_dim=self.embed_dim, + ) + if self.num_cls_tokens > 0: + self.cls_token = nn.Parameter( + torch.zeros(1, self.num_cls_tokens, self.embed_dim) + ) + if self.use_type_embed: + self.type_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) + + self.init_parameters(init_param_style) + + @torch.no_grad() + def init_parameters(self, init_param_style): + if init_param_style == "openclip": + # OpenCLIP style initialization + scale = self.embed_dim**-0.5 + if self.use_pos_embed: + nn.init.normal_(self.pos_embedding_helper.pos_embed) + self.pos_embedding_helper.pos_embed *= scale + + if self.num_cls_tokens > 0: + nn.init.normal_(self.cls_token) + self.cls_token *= scale + elif init_param_style == "vit": + self.cls_token.data.fill_(0) + else: + raise ValueError(f"Unknown init {init_param_style}") + + if self.use_type_embed: + nn.init.normal_(self.type_embed) + + def tokenize_input_and_cls_pos(self, input, stem, mask): + # tokens is of shape B x L x D + tokens = stem(input) + assert tokens.ndim == 3 + assert tokens.shape[2] == self.embed_dim + B = tokens.shape[0] + if self.num_cls_tokens > 0: + class_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole class_tokens impl from Phil Wang, thanks + tokens = torch.cat((class_tokens, tokens), dim=1) + if self.use_pos_embed: + pos_embed = self.pos_embedding_helper.get_pos_embedding(input, tokens) + tokens = tokens + pos_embed + if self.use_type_embed: + tokens = tokens + self.type_embed.expand(B, -1, -1) + return tokens + + def forward(self, vision=None, depth=None, patch_mask=None): + if patch_mask is not None: + raise NotImplementedError() + + if vision is not None: + vision_tokens = self.tokenize_input_and_cls_pos( + vision, self.rgbt_stem, patch_mask + ) + + if depth is not None: + depth_tokens = self.tokenize_input_and_cls_pos( + depth, self.depth_stem, patch_mask + ) + + # aggregate tokens + if vision is not None and depth is not None: + final_tokens = vision_tokens + depth_tokens + else: + final_tokens = vision_tokens if vision is not None else depth_tokens + return_dict = { + "trunk": { + "tokens": final_tokens, + }, + "head": {}, + } + return return_dict + + +class AudioPreprocessor(RGBDTPreprocessor): + def __init__(self, audio_stem: PatchEmbedGeneric, **kwargs) -> None: + super().__init__(rgbt_stem=audio_stem, depth_stem=None, **kwargs) + + def forward(self, audio=None): + return super().forward(vision=audio) + + +class ThermalPreprocessor(RGBDTPreprocessor): + def __init__(self, thermal_stem: PatchEmbedGeneric, **kwargs) -> None: + super().__init__(rgbt_stem=thermal_stem, depth_stem=None, **kwargs) + + def forward(self, thermal=None): + return super().forward(vision=thermal) + + +def build_causal_attention_mask(context_length): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(context_length, context_length, requires_grad=False) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + +class TextPreprocessor(VerboseNNModule): + def __init__( + self, + vocab_size: int, + context_length: int, + embed_dim: int, + causal_masking: bool, + supply_seq_len_to_head: bool = True, + num_cls_tokens: int = 0, + init_param_style: str = "openclip", + ) -> None: + super().__init__() + self.vocab_size = vocab_size + self.context_length = context_length + self.token_embedding = nn.Embedding(vocab_size, embed_dim) + self.pos_embed = nn.Parameter( + torch.empty(1, self.context_length + num_cls_tokens, embed_dim) + ) + self.causal_masking = causal_masking + if self.causal_masking: + mask = build_causal_attention_mask(self.context_length) + # register the mask as a buffer so it can be moved to the right device + self.register_buffer("mask", mask) + + self.supply_seq_len_to_head = supply_seq_len_to_head + self.num_cls_tokens = num_cls_tokens + self.embed_dim = embed_dim + if num_cls_tokens > 0: + assert self.causal_masking is False, "Masking + CLS token isn't implemented" + self.cls_token = nn.Parameter( + torch.zeros(1, self.num_cls_tokens, embed_dim) + ) + + self.init_parameters(init_param_style) + + @torch.no_grad() + def init_parameters(self, init_param_style="openclip"): + # OpenCLIP style initialization + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.pos_embed, std=0.01) + + if init_param_style == "openclip": + # OpenCLIP style initialization + scale = self.embed_dim**-0.5 + if self.num_cls_tokens > 0: + nn.init.normal_(self.cls_token) + self.cls_token *= scale + elif init_param_style == "vit": + self.cls_token.data.fill_(0) + else: + raise ValueError(f"Unknown init {init_param_style}") + + def forward(self, text): + # text tokens are of shape B x L x D + text_tokens = self.token_embedding(text) + # concat CLS tokens if any + if self.num_cls_tokens > 0: + B = text_tokens.shape[0] + class_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole class_tokens impl from Phil Wang, thanks + text_tokens = torch.cat((class_tokens, text_tokens), dim=1) + text_tokens = text_tokens + self.pos_embed + return_dict = { + "trunk": { + "tokens": text_tokens, + }, + "head": {}, + } + # Compute sequence length after adding CLS tokens + if self.supply_seq_len_to_head: + text_lengths = text.argmax(dim=-1) + return_dict["head"] = { + "seq_len": text_lengths, + } + if self.causal_masking: + return_dict["trunk"].update({"attn_mask": self.mask}) + return return_dict + + +class Im2Video(nn.Module): + """Convert an image into a trivial video.""" + + def __init__(self, time_dim=2): + super().__init__() + self.time_dim = time_dim + + def forward(self, x): + if x.ndim == 4: + # B, C, H, W -> B, C, T, H, W + return x.unsqueeze(self.time_dim) + elif x.ndim == 5: + return x + else: + raise ValueError(f"Dimension incorrect {x.shape}") + + +class PadIm2Video(Im2Video): + def __init__(self, ntimes, pad_type, time_dim=2): + super().__init__(time_dim=time_dim) + assert ntimes > 0 + assert pad_type in ["zero", "repeat"] + self.ntimes = ntimes + self.pad_type = pad_type + + def forward(self, x): + x = super().forward(x) + if x.shape[self.time_dim] == 1: + if self.pad_type == "repeat": + new_shape = [1] * len(x.shape) + new_shape[self.time_dim] = self.ntimes + x = x.repeat(new_shape) + elif self.pad_type == "zero": + padarg = [0, 0] * len(x.shape) + padarg[2 * self.time_dim + 1] = self.ntimes - x.shape[self.time_dim] + x = nn.functional.pad(x, padarg) + return x + + +# Modified from github.com/openai/CLIP +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + + list(range(ord("¡"), ord("¬") + 1)) + + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r"\s+", " ", text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str, context_length=77): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + + with g_pathmgr.open(bpe_path, "rb") as fh: + bpe_bytes = io.BytesIO(fh.read()) + merges: List[str] = gzip.open(bpe_bytes).read().decode("utf-8").split("\n") + merges = merges[1 : 49152 - 256 - 2 + 1] + merges: List[Tuple[str, ...]] = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v + "" for v in vocab] + for merge in merges: + vocab.append("".join(merge)) + vocab.extend(["<|startoftext|>", "<|endoftext|>"]) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = { + "<|startoftext|>": "<|startoftext|>", + "<|endoftext|>": "<|endoftext|>", + } + self.pat = re.compile( + r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", + re.IGNORECASE, + ) + self.context_length = context_length + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + (token[-1] + "",) + pairs = get_pairs(word) + + if not pairs: + return token + "" + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) + bpe_tokens.extend( + self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") + ) + return bpe_tokens + + def decode(self, tokens): + text = "".join([self.decoder[token] for token in tokens]) + text = ( + bytearray([self.byte_decoder[c] for c in text]) + .decode("utf-8", errors="replace") + .replace("", " ") + ) + return text + + def __call__(self, texts, context_length=None): + if not context_length: + context_length = self.context_length + + if isinstance(texts, str): + texts = [texts] + + sot_token = self.encoder["<|startoftext|>"] + eot_token = self.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts] + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + tokens = tokens[:context_length] + result[i, : len(tokens)] = torch.tensor(tokens) + + if len(result) == 1: + return result[0] + return result + + +class IMUPreprocessor(VerboseNNModule): + def __init__( + self, + kernel_size: int, + imu_stem: PatchEmbedGeneric, + embed_dim: int, + img_size: Tuple = (6, 2000), + num_cls_tokens: int = 1, + pos_embed_fn: Optional[Callable] = None, + init_param_style: str = "openclip", + ) -> None: + super().__init__() + self.imu_stem = imu_stem + self.embed_dim = embed_dim + self.use_pos_embed = pos_embed_fn is not None + self.num_cls_tokens = num_cls_tokens + self.kernel_size = kernel_size + self.pos_embed = nn.Parameter( + torch.empty(1, (img_size[1] // kernel_size) + num_cls_tokens, embed_dim) + ) + + if self.num_cls_tokens > 0: + self.cls_token = nn.Parameter( + torch.zeros(1, self.num_cls_tokens, self.embed_dim) + ) + + self.init_parameters(init_param_style) + + @torch.no_grad() + def init_parameters(self, init_param_style): + nn.init.normal_(self.pos_embed, std=0.01) + + if init_param_style == "openclip": + # OpenCLIP style initialization + scale = self.embed_dim**-0.5 + + if self.num_cls_tokens > 0: + nn.init.normal_(self.cls_token) + self.cls_token *= scale + elif init_param_style == "vit": + self.cls_token.data.fill_(0) + else: + raise ValueError(f"Unknown init {init_param_style}") + + def tokenize_input_and_cls_pos(self, input, stem): + # tokens is of shape B x L x D + tokens = stem.norm_layer(stem.proj(input)) + assert tokens.ndim == 3 + assert tokens.shape[2] == self.embed_dim + B = tokens.shape[0] + if self.num_cls_tokens > 0: + class_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole class_tokens impl from Phil Wang, thanks + tokens = torch.cat((class_tokens, tokens), dim=1) + if self.use_pos_embed: + tokens = tokens + self.pos_embed + return tokens + + def forward(self, imu): + # Patchify + imu = imu.unfold( + -1, + self.kernel_size, + self.kernel_size, + ).permute(0, 2, 1, 3) + imu = imu.reshape(imu.size(0), imu.size(1), -1) + + imu_tokens = self.tokenize_input_and_cls_pos( + imu, + self.imu_stem, + ) + + return_dict = { + "trunk": { + "tokens": imu_tokens, + }, + "head": {}, + } + return return_dict diff --git a/model/ImageBind/models/transformer.py b/model/ImageBind/models/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..4cc8216b1448747f9552662edf88d87e17827c5d --- /dev/null +++ b/model/ImageBind/models/transformer.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# Code modified from +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py ; +# https://github.com/facebookresearch/deit/blob/main/models.py +# and https://github.com/facebookresearch/vissl/blob/main/vissl/models/trunks/vision_transformer.py + + +from functools import partial +from typing import Callable, List, Optional + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint +from timm.models.layers import DropPath, trunc_normal_ + + +class Attention(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, + # can set manually to be compat with prev weights + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = ( + self.qkv(x) + .reshape(B, N, 3, self.num_heads, C // self.num_heads) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = ( + qkv[0], + qkv[1], + qkv[2], + ) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class MultiheadAttention(nn.MultiheadAttention): + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + return super().forward(x, x, x, need_weights=False, attn_mask=attn_mask)[0] + + +class ViTAttention(Attention): + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + assert attn_mask is None + return super().forward(x) + + +class BlockWithMasking(nn.Module): + def __init__( + self, + dim: int, + attn_target: Callable, + mlp_ratio: int = 4, + act_layer: Callable = nn.GELU, + norm_layer: Callable = nn.LayerNorm, + ffn_dropout_rate: float = 0.0, + drop_path: float = 0.0, + layer_scale_type: Optional[str] = None, + layer_scale_init_value: float = 1e-4, + ): + super().__init__() + + assert not isinstance( + attn_target, nn.Module + ), "attn_target should be a Callable. Otherwise attn_target is shared across blocks!" + self.attn = attn_target() + if drop_path > 0.0: + self.drop_path = DropPath(drop_path) + else: + self.drop_path = nn.Identity() + self.norm_1 = norm_layer(dim) + mlp_hidden_dim = int(mlp_ratio * dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=ffn_dropout_rate, + ) + self.norm_2 = norm_layer(dim) + self.layer_scale_type = layer_scale_type + if self.layer_scale_type is not None: + assert self.layer_scale_type in [ + "per_channel", + "scalar", + ], f"Found Layer scale type {self.layer_scale_type}" + if self.layer_scale_type == "per_channel": + # one gamma value per channel + gamma_shape = [1, 1, dim] + elif self.layer_scale_type == "scalar": + # single gamma value for all channels + gamma_shape = [1, 1, 1] + # two gammas: for each part of the fwd in the encoder + self.layer_scale_gamma1 = nn.Parameter( + torch.ones(size=gamma_shape) * layer_scale_init_value, + requires_grad=True, + ) + self.layer_scale_gamma2 = nn.Parameter( + torch.ones(size=gamma_shape) * layer_scale_init_value, + requires_grad=True, + ) + + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + if self.layer_scale_type is None: + x = x + self.drop_path(self.attn(self.norm_1(x), attn_mask)) + x = x + self.drop_path(self.mlp(self.norm_2(x))) + else: + x = ( + x + + self.drop_path(self.attn(self.norm_1(x), attn_mask)) + # * self.layer_scale_gamma1 + ) + x = x + self.drop_path(self.mlp(self.norm_2(x))) # * self.layer_scale_gamma2 + return x + + +_LAYER_NORM = partial(nn.LayerNorm, eps=1e-6) + + +class SimpleTransformer(nn.Module): + def __init__( + self, + attn_target: Callable, + embed_dim: int, + num_blocks: int, + block: Callable = BlockWithMasking, + pre_transformer_layer: Optional[Callable] = None, + post_transformer_layer: Optional[Callable] = None, + drop_path_rate: float = 0.0, + drop_path_type: str = "progressive", + norm_layer: Callable = _LAYER_NORM, + mlp_ratio: int = 4, + ffn_dropout_rate: float = 0.0, + layer_scale_type: Optional[str] = None, # from cait; possible values are None, "per_channel", "scalar" + layer_scale_init_value: float = 1e-4, # from cait; float + weight_init_style: str = "jax", # possible values jax or pytorch + ): + """ + Simple Transformer with the following features + 1. Supports masked attention + 2. Supports DropPath + 3. Supports LayerScale + 4. Supports Dropout in Attention and FFN + 5. Makes few assumptions about the input except that it is a Tensor + """ + super().__init__() + self.pre_transformer_layer = pre_transformer_layer + if drop_path_type == "progressive": + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_blocks)] + elif drop_path_type == "uniform": + dpr = [drop_path_rate for i in range(num_blocks)] + else: + raise ValueError(f"Unknown drop_path_type: {drop_path_type}") + + self.blocks = nn.Sequential( + *[ + block( + dim=embed_dim, + attn_target=attn_target, + mlp_ratio=mlp_ratio, + ffn_dropout_rate=ffn_dropout_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + layer_scale_type=layer_scale_type, + layer_scale_init_value=layer_scale_init_value, + ) + for i in range(num_blocks) + ] + ) + self.post_transformer_layer = post_transformer_layer + self.weight_init_style = weight_init_style + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + if self.weight_init_style == "jax": + # Based on MAE and official Jax ViT implementation + torch.nn.init.xavier_uniform_(m.weight) + elif self.weight_init_style == "pytorch": + # PyTorch ViT uses trunc_normal_ + trunc_normal_(m.weight, std=0.02) + + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, (nn.LayerNorm)): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward( + self, + tokens: torch.Tensor, + attn_mask: torch.Tensor = None, + use_checkpoint: bool = False, + checkpoint_every_n: int = 1, + checkpoint_blk_ids: Optional[List[int]] = None, + # return_multi_layer_outputs = False, + out_layers = [] + ): + + """ + Inputs + - tokens: data of shape N x L x D (or L x N x D depending on the attention implementation) + - attn: mask of shape L x L + + Output + - x: data of shape N x L x D (or L x N x D depending on the attention implementation) + """ + out_tokens = [] + + if self.pre_transformer_layer: + tokens = self.pre_transformer_layer(tokens) + if use_checkpoint and checkpoint_blk_ids is None: + checkpoint_blk_ids = [ + blk_id + for blk_id in range(len(self.blocks)) + if blk_id % checkpoint_every_n == 0 + ] + if checkpoint_blk_ids: + checkpoint_blk_ids = set(checkpoint_blk_ids) + for blk_id, blk in enumerate(self.blocks): + if use_checkpoint and blk_id in checkpoint_blk_ids: + tokens = checkpoint.checkpoint( + blk, tokens, attn_mask, use_reentrant=False + ) + else: + tokens = blk(tokens, attn_mask=attn_mask) + if blk_id in out_layers: + out_tokens.append(tokens) + if self.post_transformer_layer: + tokens = self.post_transformer_layer(tokens) + return tokens, out_tokens diff --git a/model/ImageBind/requirements.txt b/model/ImageBind/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..572ae079a6cc3592552d93b8ca08c3ec7fd4efc9 --- /dev/null +++ b/model/ImageBind/requirements.txt @@ -0,0 +1,10 @@ +--extra-index-url https://download.pytorch.org/whl/cu113 +torchvision==0.14.0 +torchaudio==0.13.0 +pytorchvideo @ git+https://github.com/facebookresearch/pytorchvideo.git@28fe037d212663c6a24f373b94cc5d478c8c1a1d +timm==0.6.7 +ftfy +regex +einops +fvcore +decord==0.6.0 diff --git a/model/__init__.py b/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ad1bf1ef16333db7e465fafcd6d9e1870f77551c --- /dev/null +++ b/model/__init__.py @@ -0,0 +1,11 @@ +from .agent import DeepSpeedAgent +from .openllama import OpenLLAMAPEFTModel +# from .openllama_CLIP import OpenLLAMAPEFTModel_CLIP +from .ImageBind import models + +def load_model(args): + agent_name = args['models'][args['model']]['agent_name'] + model_name = args['models'][args['model']]['model_name'] + model = globals()[model_name](**args) + agent = globals()[agent_name](model, args) + return agent diff --git a/model/agent.py b/model/agent.py new file mode 100644 index 0000000000000000000000000000000000000000..a5199b6497f739f5800dbd38daade944459457e8 --- /dev/null +++ b/model/agent.py @@ -0,0 +1,81 @@ +from header import * + +class DeepSpeedAgent: + + def __init__(self, model, args): + super(DeepSpeedAgent, self).__init__() + self.args = args + self.model = model + self.load_stage_1_parameters(args["delta_ckpt_path"]) + + + + for name, param in self.model.named_parameters(): + param.requires_grad = False + + for name, param in self.model.image_decoder.named_parameters(): + param.requires_grad = True + + for name, param in self.model.prompt_learner.named_parameters(): + param.requires_grad = True + + + + + # load config parameters of deepspeed + ds_params = json.load(open(self.args['ds_config_path'])) + ds_params['scheduler']['params']['total_num_steps'] = self.args['total_steps'] + ds_params['scheduler']['params']['warmup_num_steps'] = max(10, int(self.args['total_steps'] * self.args['warmup_rate'])) + self.ds_engine, self.optimizer, _ , _ = deepspeed.initialize( + model=self.model, + model_parameters=self.model.parameters(), + config_params=ds_params, + dist_init_required=True, + args=types.SimpleNamespace(**args) + ) + + @torch.no_grad() + def predict(self, batch): + self.model.eval() + string = self.model.generate_one_sample(batch) + return string + + def train_model(self, batch, current_step=0, pbar=None): + self.ds_engine.module.train() + loss, mle_acc = self.ds_engine(batch) + + self.ds_engine.backward(loss) + self.ds_engine.step() + pbar.set_description(f'[!] loss: {round(loss.item(), 4)}; token_acc: {round(mle_acc*100, 2)}') + pbar.update(1) + if self.args['local_rank'] == 0 and self.args['log_path'] and current_step % self.args['logging_step'] == 0: + elapsed = pbar.format_dict['elapsed'] + rate = pbar.format_dict['rate'] + remaining = (pbar.total - pbar.n) / rate if rate and pbar.total else 0 + remaining = str(datetime.timedelta(seconds=remaining)) + logging.info(f'[!] progress: {round(pbar.n/pbar.total, 5)}; remaining time: {remaining}; loss: {round(loss.item(), 4)}; token_acc: {round(mle_acc*100, 2)}') + + mle_acc *= 100 + return mle_acc + + def save_model(self, path, current_step): + # only save trainable model parameters + param_grad_dic = { + k: v.requires_grad for (k, v) in self.ds_engine.module.named_parameters() + } + state_dict = self.ds_engine.module.state_dict() + checkpoint = OrderedDict() + for k, v in self.ds_engine.module.named_parameters(): + if v.requires_grad: + print(k) + checkpoint[k] = v + torch.save(checkpoint, f'{path}/pytorch_model.pt') + # save tokenizer + self.model.llama_tokenizer.save_pretrained(path) + # save configuration + self.model.llama_model.config.save_pretrained(path) + print(f'[!] save model into {path}') + + def load_stage_1_parameters(self, path): + delta_ckpt = torch.load(path, map_location=torch.device('cpu')) + self.model.load_state_dict(delta_ckpt, strict=False) diff --git a/model/modeling_llama.py b/model/modeling_llama.py new file mode 100644 index 0000000000000000000000000000000000000000..12d980e189d902fb1a6d9ea05dc3ca91959b1c8c --- /dev/null +++ b/model/modeling_llama.py @@ -0,0 +1,755 @@ +# This script is based on https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py + +""" PyTorch LLaMA model.""" +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from transformers.models.llama.configuration_llama import LlamaConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "LlamaConfig" + + +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +class LlamaRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + LlamaRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + + # convert into half-precision if necessary + if self.weight.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.to(self.weight.dtype) + + return self.weight * hidden_states + + +class LlamaRotaryEmbedding(torch.nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) + self.register_buffer("inv_freq", inv_freq) + + # Build here to make `torch.jit.trace` work. + self.max_seq_len_cached = max_position_embeddings + t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False) + self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case. + if seq_len > self.max_seq_len_cached: + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1).to(x.device) + self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False) + self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False) + return ( + self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + ) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids): + gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1] + gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3]) + cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) + sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class LlamaMLP(nn.Module): + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + ): + super().__init__() + self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) + self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) + self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) + self.act_fn = ACT2FN[hidden_act] + + def forward(self, x): + return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + +class LlamaAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: LlamaConfig): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.max_position_embeddings = config.max_position_embeddings + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + # [bsz, nh, t, hd] + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class LlamaDecoderLayer(nn.Module): + def __init__(self, config: LlamaConfig): + super().__init__() + self.hidden_size = config.hidden_size + self.self_attn = LlamaAttention(config=config) + self.mlp = LlamaMLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + ) + self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +LLAMA_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`LlamaConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", + LLAMA_START_DOCSTRING, +) +class LlamaPreTrainedModel(PreTrainedModel): + config_class = LlamaConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["LlamaDecoderLayer"] + _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, LlamaModel): + module.gradient_checkpointing = value + + +LLAMA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", + LLAMA_START_DOCSTRING, +) +class LlamaModel(LlamaPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] + + Args: + config: LlamaConfig + """ + + def __init__(self, config: LlamaConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + query_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + if query_embeds is not None: + inputs_embeds = torch.cat([query_embeds, inputs_embeds], dim=1) + batch_size, seq_length, _ = inputs_embeds.shape + + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + # embed positions + if attention_mask is None: + attention_mask = torch.ones( + (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device + ) + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, None) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + position_ids, + None, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class LlamaForCausalLM(LlamaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.model = LlamaModel(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + query_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, LlamaForCausalLM + + >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you consciours? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + query_embeds=query_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, query_embeds=None, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + if past_key_values: + input_ids = input_ids[:, -1:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -1].unsqueeze(-1) + query_embeds = None + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "query_embeds": query_embeds, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past + diff --git a/model/openllama.py b/model/openllama.py new file mode 100644 index 0000000000000000000000000000000000000000..8fe63239591e54817171653d3ad945d0f918372b --- /dev/null +++ b/model/openllama.py @@ -0,0 +1,729 @@ +from header import * +import torch.nn.functional as F +from .ImageBind import * +from .ImageBind import data +from .modeling_llama import LlamaForCausalLM +from .AnomalyGPT_models import LinearLayer, PromptLearner +from transformers import StoppingCriteria, StoppingCriteriaList +from utils.loss import FocalLoss, BinaryDiceLoss +import kornia as K + +import torch +from torch.nn.utils import rnn + +CLASS_NAMES = ['bottle', 'cable', 'capsule', 'carpet', 'grid', 'hazelnut', 'leather', 'metal nut', 'pill', 'screw', 'tile', 'toothbrush', 'transistor', 'wood', 'zipper', 'object', + 'candle', 'cashew', 'chewinggum', 'fryum', 'macaroni', 'pcb', 'pipe fryum'] + +prompt_normal = ['{}', 'flawless {}', 'perfect {}', 'unblemished {}', '{} without flaw', '{} without defect', '{} without damage'] +prompt_abnormal = ['damaged {}', 'broken {}', '{} with flaw', '{} with defect', '{} with damage'] + +prompt_state = [prompt_normal, prompt_abnormal] +prompt_templates = ['a photo of a {}.', 'a photo of the {}.'] +# prompt_templates = [ +# 'a cropped photo of the {}.', 'a cropped photo of a {}.', 'a close-up photo of a {}.', 'a close-up photo of the {}.', +# 'a bright photo of the {}.', 'a bright photo of a {}.', 'a dark photo of a {}.', 'a dark photo of the {}.', +# 'a dark photo of the {}.', 'a dark photo of a {}.', 'a jpeg corrupted photo of a {}.', 'a jpeg corrupted photo of the {}.', +# 'a blurry photo of the {}.', 'a blurry photo of a {}.', 'a photo of a {}.', 'a photo of the {}.', +# 'a photo of the small {}.', 'a photo of a small {}.', 'a photo of the large {}.', 'a photo of a large {}.', +# 'a photo of the {} for visual insprction.', 'a photo of a {} for visual insprction.', +# 'a photo of the {} for anomaly detection.', 'a photo of a {} for anomaly detection.' +# ] +objs = ['bottle', 'cable', 'capsule', 'carpet', 'grid', 'hazelnut', 'leather', 'metal nut', 'pill', 'screw', 'tile', 'toothbrush', 'transistor', 'wood', 'zipper', 'object', + 'candle', 'cashew', 'chewinggum', 'fryum', 'macaroni', 'pcb', 'pipe fryum', 'macaroni1', 'macaroni2','pcb1', 'pcb2', 'pcb3', 'pcb4', 'capsules'] + +prompt_sentences = {} + +for obj in objs: + prompt_sentence_obj = [] + for i in range(len(prompt_state)): + prompted_state = [state.format(obj) for state in prompt_state[i]] + prompted_sentence = [] + for s in prompted_state: + for template in prompt_templates: + prompted_sentence.append(template.format(s)) + prompted_sentence = data.load_and_transform_text(prompted_sentence, torch.device('cpu'))#torch.cuda.current_device()) + prompt_sentence_obj.append(prompted_sentence) + prompt_sentences[obj] = prompt_sentence_obj + + + +def encode_text_with_prompt_ensemble(model, obj, device): + + global prompt_sentences + normal_sentences = [] + abnormal_sentences = [] + for idx in range(len(obj)): + sentence = prompt_sentences[obj[idx].replace('_', ' ')] + normal_sentences.append(sentence[0]) + abnormal_sentences.append(sentence[1]) + + normal_sentences = torch.cat(normal_sentences).to(device) + abnormal_sentences = torch.cat(abnormal_sentences).to(device) + + class_embeddings_normal = model({ModalityType.TEXT: normal_sentences})[ModalityType.TEXT][0] + class_embeddings_abnormal = model({ModalityType.TEXT: abnormal_sentences})[ModalityType.TEXT][0] + # class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True) + + class_embeddings_normal = class_embeddings_normal.reshape((len(obj), len(prompt_templates) * len(prompt_normal), 1024)) + class_embeddings_normal = class_embeddings_normal.mean(dim=1, keepdim=True) + class_embeddings_normal = class_embeddings_normal / class_embeddings_normal.norm(dim=-1, keepdim=True) + + class_embeddings_abnormal = class_embeddings_abnormal.reshape((len(obj), len(prompt_templates) * len(prompt_abnormal), 1024)) + class_embeddings_abnormal = class_embeddings_abnormal.mean(dim=1, keepdim=True) + class_embeddings_abnormal = class_embeddings_abnormal / class_embeddings_abnormal.norm(dim=-1, keepdim=True) + + text_features = torch.cat([class_embeddings_normal, class_embeddings_abnormal], dim=1) + + return text_features + + + +class StoppingCriteriaSub(StoppingCriteria): + + def __init__(self, stops = [], encounters=1): + super().__init__() + self.stops = stops + self.ENCOUNTERS = encounters + + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): + stop_count = 0 + for stop in self.stops: + stop_count = (stop == input_ids[0]).sum().item() + if stop_count >= self.ENCOUNTERS: + return True + return False + +def build_one_instance(tokenizer, conversation): + text_list = [] + turn_num = len(conversation) + input_ids, target_ids = [], [] + for i in range(turn_num): + turn = conversation[i] + role = turn['from'] + if i == 0: # the first human turn + assert role == 'human' + text = turn['value'] + '\n### Assistant:' + one_input_id = tokenizer(text, add_special_tokens=False).input_ids + input_ids += one_input_id + target_ids += [-100]*len(one_input_id) # do not perform loss regression on human prompt + else: + if role == 'human': + text = 'Human: ' + turn['value'] + '\n### Assistant:' + one_input_id = tokenizer(text, add_special_tokens=False).input_ids + input_ids += one_input_id + target_ids += [-100]*len(one_input_id) + elif role == 'gpt': + text = turn['value'] + '\n###' + one_input_id = tokenizer(text, add_special_tokens=False).input_ids + input_ids += one_input_id + target_ids += one_input_id + else: + raise Exception('Wrong Role!!!') + text_list.append(text) + assert len(input_ids) == len(target_ids) + return text_list, input_ids, target_ids + +def process_batch_instance(tokenizer, batch_of_conversations, max_tgt_len): + batch_input_ids, batch_target_ids = [], [] + for conversation in batch_of_conversations: + _, one_input_ids, one_target_ids = build_one_instance(tokenizer, conversation) + batch_input_ids.append(torch.LongTensor(one_input_ids)) + batch_target_ids.append(torch.LongTensor(one_target_ids)) + input_ids = rnn.pad_sequence(batch_input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) + target_ids = rnn.pad_sequence(batch_target_ids, batch_first=True, padding_value=-100) + assert input_ids.size() == target_ids.size() + input_ids = input_ids[:,:max_tgt_len] + target_ids = target_ids[:,:max_tgt_len] + attention_mask = input_ids.ne(tokenizer.pad_token_id) + assert attention_mask.size() == input_ids.size() + return input_ids, target_ids, attention_mask.long() + +def find_first_file_in_directory(directory_path): + try: + file_list = os.listdir(directory_path) + for item in file_list: + item_path = os.path.join(directory_path, item) + if os.path.isfile(item_path): + return item_path + return None + + except OSError as e: + print(f"Error while accessing directory: {e}") + return None + + +PROMPT_START = '### Human: ' +class OpenLLAMAPEFTModel(nn.Module): + + '''LoRA for LLaMa model''' + + def __init__(self, **args): + super(OpenLLAMAPEFTModel, self).__init__() + self.args = args + imagebind_ckpt_path = args['imagebind_ckpt_path'] + vicuna_ckpt_path = args['vicuna_ckpt_path'] + max_tgt_len = args['max_tgt_len'] + stage = args['stage'] + + print (f'Initializing visual encoder from {imagebind_ckpt_path} ...') + + self.visual_encoder, self.visual_hidden_size = imagebind_model.imagebind_huge(args) + imagebind_ckpt = torch.load(imagebind_ckpt_path, map_location=torch.device('cpu')) + self.visual_encoder.load_state_dict(imagebind_ckpt, strict=True) + + self.iter = 0 + + self.image_decoder = LinearLayer(1280, 1024, 4) + + self.prompt_learner = PromptLearner(1, 4096) + + self.loss_focal = FocalLoss() + self.loss_dice = BinaryDiceLoss() + + + # free vision encoder + for name, param in self.visual_encoder.named_parameters(): + param.requires_grad = False + self.visual_encoder.eval() + print ('Visual encoder initialized.') + + print (f'Initializing language decoder from {vicuna_ckpt_path} ...') + + # add the lora module + peft_config = LoraConfig( + task_type=TaskType.CAUSAL_LM, + inference_mode=False, + r=self.args['lora_r'], + lora_alpha=self.args['lora_alpha'], + lora_dropout=self.args['lora_dropout'], + target_modules=['q_proj', 'k_proj', 'v_proj', 'o_proj'] + ) + + self.llama_model = LlamaForCausalLM.from_pretrained(vicuna_ckpt_path) + self.llama_model = get_peft_model(self.llama_model, peft_config) + self.llama_model.print_trainable_parameters() + + self.llama_tokenizer = LlamaTokenizer.from_pretrained(vicuna_ckpt_path, use_fast=False) + self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token + self.llama_tokenizer.padding_side = "right" + print ('Language decoder initialized.') + + self.llama_proj = nn.Linear( + self.visual_hidden_size, self.llama_model.config.hidden_size + ) + + self.max_tgt_len = max_tgt_len + self.device = torch.device('cpu')#torch.cuda.current_device() + + + def rot90_img(self,x,k): + # k is 0,1,2,3 + degreesarr = [0., 90., 180., 270., 360] + degrees = torch.tensor(degreesarr[k]).to(self.llama_model.dtype).to(self.device) + x = K.geometry.transform.rotate(x, angle = degrees, padding_mode='reflection') + return x + + def encode_video(self, video_paths): + inputs = {ModalityType.VISION: data.load_and_transform_video_data(video_paths, self.device)} + # convert into visual dtype + inputs = {key: inputs[key].to(self.llama_model.dtype) for key in inputs} + with torch.no_grad(): + embeddings = self.visual_encoder(inputs) + video_embeds = embeddings[ModalityType.VISION][0] # bsz x 1024 + inputs_llama = self.llama_proj(video_embeds).unsqueeze(1) # bsz x 1 x llama_size + atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(self.device) # bsz x 1 + return inputs_llama, atts_llama + + def encode_audio(self, audio_paths): + inputs = {ModalityType.AUDIO: data.load_and_transform_audio_data(audio_paths, self.device)} + # convert into visual dtype + inputs = {key: inputs[key].to(self.llama_model.dtype) for key in inputs} + with torch.no_grad(): + embeddings = self.visual_encoder(inputs) + audio_embeds = embeddings[ModalityType.AUDIO][0] # bsz x 1024 + inputs_llama = self.llama_proj(audio_embeds).unsqueeze(1) # bsz x 1 x llama_size + atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(self.device) # bsz x 1 + return inputs_llama, atts_llama + + def encode_thermal(self, thermal_paths): + inputs = {ModalityType.THERMAL: data.load_and_transform_thermal_data(thermal_paths, self.device)} + # convert into visual dtype + inputs = {key: inputs[key].to(self.llama_model.dtype) for key in inputs} + with torch.no_grad(): + embeddings = self.visual_encoder(inputs) + image_embeds = embeddings['thermal'][0] # bsz x 1024 + inputs_llama = self.llama_proj(image_embeds).unsqueeze(1) # bsz x 1 x llama_size + atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(self.device) # bsz x 1 + return inputs_llama, atts_llama + + def encode_image(self, image_paths): + inputs = {ModalityType.VISION: data.load_and_transform_vision_data(image_paths, self.device)} + # convert into visual dtype + inputs = {key: inputs[key].to(self.llama_model.dtype) for key in inputs} + with torch.no_grad(): + embeddings = self.visual_encoder(inputs) + image_embeds = embeddings['vision'][0] # bsz x 1024 + patch_features = embeddings['vision'][1] # bsz x h*w x 1280 + patch_tokens = self.image_decoder(patch_features) # bsz x h*w x 1024 + + inputs_llama = self.llama_proj(image_embeds).unsqueeze(1) # bsz x 1 x llama_size + atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(self.device) # bsz x 1 + return inputs_llama, atts_llama, patch_tokens + + def encode_image_for_web_demo(self, image_paths): + inputs = {ModalityType.VISION: data.load_and_transform_vision_data_for_web_demo(image_paths, self.device)} + # convert into visual dtype + inputs = {key: inputs[key].to(self.llama_model.dtype) for key in inputs} + with torch.no_grad(): + embeddings = self.visual_encoder(inputs) + image_embeds = embeddings['vision'][0] # bsz x 1024 + patch_features = embeddings['vision'][1] # bsz x h*w x 1280 + patch_tokens = self.image_decoder(patch_features) # bsz x h*w x 1024 + + inputs_llama = self.llama_proj(image_embeds).unsqueeze(1) # bsz x 1 x llama_size + atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(self.device) # bsz x 1 + return inputs_llama, atts_llama, patch_tokens + + def encode_image_for_one_shot(self, image_paths): + inputs = {ModalityType.VISION: data.load_and_transform_vision_data(image_paths, self.device)} + # convert into visual dtype + inputs = {key: inputs[key].to(self.llama_model.dtype) for key in inputs} + with torch.no_grad(): + embeddings = self.visual_encoder(inputs) + patch_features = embeddings['vision'][1] # bsz x h*w x 1280 + for i in range(len(patch_features)): + patch_features[i] = patch_features[i].transpose(0, 1)[:, 1:, :] + + return patch_features + + def encode_image_for_one_shot_from_tensor(self, image_tensors): + if not isinstance(image_tensors, list): + image_tensors = [image_tensors] + inputs = {ModalityType.VISION: torch.stack(image_tensors, dim=0).to(self.device)} + # convert into visual dtype + inputs = {key: inputs[key].to(self.llama_model.dtype) for key in inputs} + with torch.no_grad(): + embeddings = self.visual_encoder(inputs) + patch_features = embeddings['vision'][1] # bsz x h*w x 1280 + for i in range(len(patch_features)): + patch_features[i] = patch_features[i].transpose(0, 1)[:, 1:, :] + + return patch_features + + def encode_image_for_one_shot_with_aug(self, image_paths): + image_tensors = data.load_and_transform_vision_data(image_paths, self.device).to(self.llama_model.dtype) + B,C,H,W = image_tensors.shape + # print(B,C,H,W) + + rotated_images = torch.zeros((4, B, C, H, W)).to(self.llama_model.dtype).to(self.device) + + + for j, degree in enumerate([0, 1, 2, 3]): + rotated_img = self.rot90_img(image_tensors, degree) + # 存储旋转后的图像 + rotated_images[j] = rotated_img + + image_tensors = rotated_images.transpose(0,1).reshape(B * 4, C, H, W) + + inputs = {ModalityType.VISION: image_tensors} + # convert into visual dtype + inputs = {key: inputs[key] for key in inputs} + with torch.no_grad(): + embeddings = self.visual_encoder(inputs) + patch_features = embeddings['vision'][1] # bsz x h*w x 1280 + for i in range(len(patch_features)): + patch_features[i] = patch_features[i].transpose(0, 1)[:, 1:, :].reshape(B,4,256,1280).reshape(B, 4 * 256, 1280) + + return patch_features + + def encode_image_from_tensor(self, image_tensors): + if not isinstance(image_tensors, list): + image_tensors = [image_tensors] + inputs = {ModalityType.VISION: torch.stack(image_tensors, dim=0).to(self.device)} + # convert into visual dtype + inputs = {key: inputs[key].to(self.llama_model.dtype) for key in inputs} + with torch.no_grad(): + embeddings = self.visual_encoder(inputs) + image_embeds = embeddings['vision'][0] # bsz x 1024 + patch_features = embeddings['vision'][1] # bsz x h*w x 1024 + patch_tokens = self.image_decoder(patch_features) + + + inputs_llama = self.llama_proj(image_embeds).unsqueeze(1) # bsz x 1 x llama_size + atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(self.device) # bsz x 1 + return inputs_llama, atts_llama, patch_tokens + + def encode_image_from_tensor_no_patch(self, image_tensors): + if not isinstance(image_tensors, list): + image_tensors = [image_tensors] + inputs = {ModalityType.VISION: torch.stack(image_tensors, dim=0).to(self.device)} + # convert into visual dtype + inputs = {key: inputs[key].to(self.llama_model.dtype) for key in inputs} + with torch.no_grad(): + embeddings = self.visual_encoder(inputs) + image_embeds = embeddings['vision'][0] # bsz x 1024 + + inputs_llama = self.llama_proj(image_embeds).unsqueeze(1) # bsz x 1 x llama_size + atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(self.device) # bsz x 1 + return inputs_llama, atts_llama + + + + def prompt_wrap(self, img_embeds, input_ids, target_ids, attention_mask, anomaly_embedding = None): + ''' + input_ids, target_ids, attention_mask: bsz x s2 + ''' + input_ids = input_ids.to(self.device) # bsz x s2 + target_ids = target_ids.to(self.device) # bsz x s2 + attention_mask = attention_mask.to(self.device) # bsz x s2 + + batch_size = img_embeds.shape[0] + p_before = PROMPT_START + p_before_tokens = self.llama_tokenizer(p_before, + return_tensors="pt", add_special_tokens=False).to(self.device) + # peft model need deeper call + p_before_embeds = self.llama_model.model.model.embed_tokens(p_before_tokens.input_ids).expand(batch_size, -1, -1) # bsz x s1 x embed_dim + + p_middle = ' ' + p_middle_tokens = self.llama_tokenizer(p_middle, + return_tensors="pt", add_special_tokens=False).to(self.device) + # peft model need deeper call + p_middle_embeds = self.llama_model.model.model.embed_tokens(p_middle_tokens.input_ids).expand(batch_size, -1, -1) # bsz x s1 x embed_dim + + + p_after_embeds = self.llama_model.model.model.embed_tokens(input_ids).expand(batch_size, -1, -1) # bsz x s2 x embed_dim + bos = torch.ones([batch_size, 1], + dtype=p_before_tokens.input_ids.dtype, + device=p_before_tokens.input_ids.device) * self.llama_tokenizer.bos_token_id # bsz x 1 + bos_embeds = self.llama_model.model.model.embed_tokens(bos) # bsz x 1 x embed_dim + + + + if anomaly_embedding != None: + inputs_embeds = torch.cat([bos_embeds, p_before_embeds, img_embeds, p_middle_embeds, anomaly_embedding, p_after_embeds], dim=1) # bsz x (1+s1+1+s2) x embed_dim + # create targets + empty_targets = ( + torch.ones([batch_size, 1+p_before_embeds.size()[1]+1+p_middle_embeds.size()[1] + anomaly_embedding.size()[1]], # 1 (bos) + s1 + 1 (image vector) + dtype=torch.long).to(self.device).fill_(-100) + ) # bsz x (1 + s1 + 1) + targets = torch.cat([empty_targets, target_ids], dim=1) # bsz x (1 + s1 + 1 + s2) + assert inputs_embeds.size()[1] == targets.size()[1] + + atts_prefix = torch.ones([batch_size, 1+p_before_embeds.size()[1]+1+p_middle_embeds.size()[1] + anomaly_embedding.size()[1]], dtype=torch.long).to(self.device) # bsz x (1 + s1 +1) + attention_mask = torch.cat([atts_prefix, attention_mask], dim=1) + assert attention_mask.size() == targets.size() # bsz x (1 + s1 + 1 + s2) + return inputs_embeds, targets, attention_mask + else: + inputs_embeds = torch.cat([bos_embeds, p_before_embeds, img_embeds, p_middle_embeds, p_after_embeds], dim=1) # bsz x (1+s1+1+s2) x embed_dim + # create targets + empty_targets = ( + torch.ones([batch_size, 1+p_before_embeds.size()[1]+1+p_middle_embeds.size()[1]], # 1 (bos) + s1 + 1 (image vector) + dtype=torch.long).to(self.device).fill_(-100) + ) # bsz x (1 + s1 + 1) + targets = torch.cat([empty_targets, target_ids], dim=1) # bsz x (1 + s1 + 1 + s2) + assert inputs_embeds.size()[1] == targets.size()[1] + + atts_prefix = torch.ones([batch_size, 1+p_before_embeds.size()[1]+1+p_middle_embeds.size()[1]], dtype=torch.long).to(self.device) # bsz x (1 + s1 +1) + attention_mask = torch.cat([atts_prefix, attention_mask], dim=1) + assert attention_mask.size() == targets.size() # bsz x (1 + s1 + 1 + s2) + return inputs_embeds, targets, attention_mask + + + def forward(self, inputs): + + if 'masks' in inputs: + + image_paths = inputs['images'] + img_embeds, _, patch_tokens = self.encode_image_from_tensor(image_paths) + class_name = inputs['class_names'] + + loss_pixel = 0 + feats_text_tensor = encode_text_with_prompt_ensemble(self.visual_encoder, ['object' for _ in class_name], self.device) + + anomaly_maps = [] + for layer in range(len(patch_tokens)): + patch_tokens[layer] = patch_tokens[layer] / patch_tokens[layer].norm(dim=-1, keepdim=True) + # print(patch_tokens[layer].shape) + # anomaly_map = torch.bmm(patch_tokens[layer], feats_text_tensor.transpose(-2,-1)) + anomaly_map = (100.0 * patch_tokens[layer] @ feats_text_tensor.transpose(-2,-1)) + B, L, C = anomaly_map.shape + H = int(np.sqrt(L)) + anomaly_map = F.interpolate(anomaly_map.permute(0, 2, 1).view(B, 2, H, H), + size=224, mode='bilinear', align_corners=True) + # anomaly_map_no_softmax = anomaly_map + anomaly_map = torch.softmax(anomaly_map, dim=1) + anomaly_maps.append(anomaly_map) + # anomaly_maps_ns.append(anomaly_map_no_softmax) + + gt = inputs['masks'] + gt = torch.stack(gt, dim=0).to(self.device) + gt = gt.squeeze() + # print(gt.max(), gt.min()) + gt[gt > 0.3], gt[gt <= 0.3] = 1, 0 + + + for num in range(len(anomaly_maps)): + f_loss = self.loss_focal(anomaly_maps[num], gt) + d_loss = self.loss_dice(anomaly_maps[num][:, 1, :, :], gt) + loss_pixel = loss_pixel + f_loss + d_loss + + for num in range(len(anomaly_maps)): + anomaly_maps[num] = anomaly_maps[num][:,1,:,:] + + anomaly_map_all = torch.mean(torch.stack(anomaly_maps, dim=0), dim=0).unsqueeze(1) + + if random.randint(0,1) == 0 and len(inputs['img_paths']) == len(image_paths): + + normal_paths = [] + for path in inputs['img_paths']: + normal_path = path.replace('test', 'train') + normal_path = find_first_file_in_directory("/".join(normal_path.split('/')[:-2])+'/good') + normal_paths.append(normal_path) + + print(normal_paths) + query_patch_tokens = self.encode_image_for_one_shot_from_tensor(image_paths) + normal_patch_tokens = self.encode_image_for_one_shot_with_aug(normal_paths) + sims = [] + B = len(image_paths) + + for i in range(len(query_patch_tokens)): + query_patch_tokens_reshaped = query_patch_tokens[i].view(B,256,1,1280) + normal_tokens_reshaped = normal_patch_tokens[i].reshape(B,1,-1,1280) + cosine_similarity_matrix = F.cosine_similarity(query_patch_tokens_reshaped, normal_tokens_reshaped, dim=-1) + sim_max, _ = torch.max(cosine_similarity_matrix, dim=-1) + sims.append(sim_max) + + sim = torch.mean(torch.stack(sims,dim=0), dim=0).reshape(B,1,16,16) + sim = F.interpolate(sim,size=224, mode='bilinear', align_corners=True) + anomaly_map_all = 1 - sim # (anomaly_map_all + 1 - sim) / 2 + + anomaly_map_prompts = self.prompt_learner(anomaly_map_all) + + # img_embeds = img_embeds + anomaly_map_prompts + + output_texts = inputs['texts'] + input_ids, target_ids, attention_mask = process_batch_instance(self.llama_tokenizer, output_texts, self.max_tgt_len) + inputs_embeds, targets, attention_mask = self.prompt_wrap(img_embeds, input_ids, target_ids, attention_mask, anomaly_map_prompts) + + outputs = self.llama_model( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + return_dict=True, + labels=targets, + ) + loss = outputs.loss + + # loss_l2 = torch.norm(anomaly_map_prompts / 2 , p=2) + # loss_l2 = nn.MSELoss()(img_embeds_origin, img_embeds) + # calculate the token accuarcy + chosen_tokens = torch.max(outputs.logits, dim=-1)[1][:, 1:-1] # [B, S-1] + # print(self.llama_tokenizer.decode(chosen_tokens[0], skip_special_tokens=True)) + labels = targets[:, 2:] + gen_acc = (chosen_tokens.reshape(-1) == labels.reshape(-1)).to(torch.long) # [B*S] + valid_mask = (labels != -100).reshape(-1) + # print(self.llama_tokenizer.decode(chosen_tokens.reshape(-1)[valid_mask], skip_special_tokens=True)) + valid_tokens = gen_acc & valid_mask # [B*S] + gen_acc = valid_tokens.sum().item() / valid_mask.sum().item() + + return loss + loss_pixel, gen_acc + + else: + + image_paths = inputs['image_paths'] + img_embeds, _, patch_tokens = self.encode_image_from_tensor(image_paths) + + output_texts = inputs['output_texts'] + + c_name = 'object' + for name in CLASS_NAMES: + if name in output_texts: + c_name = name + break + + feats_text_tensor = encode_text_with_prompt_ensemble(self.visual_encoder, ['object'] * len(image_paths), self.device) + + anomaly_maps = [] + for layer in range(len(patch_tokens)): + patch_tokens[layer] = patch_tokens[layer] / patch_tokens[layer].norm(dim=-1, keepdim=True) + # print(patch_tokens[layer].shape) + # anomaly_map = torch.bmm(patch_tokens[layer], feats_text_tensor.transpose(-2,-1)) + anomaly_map = (100.0 * patch_tokens[layer] @ feats_text_tensor.transpose(-2,-1)) + B, L, C = anomaly_map.shape + H = int(np.sqrt(L)) + anomaly_map = F.interpolate(anomaly_map.permute(0, 2, 1).view(B, 2, H, H), + size=224, mode='bilinear', align_corners=True) + # anomaly_map_no_softmax = anomaly_map + anomaly_map = torch.softmax(anomaly_map, dim=1) + anomaly_maps.append(anomaly_map) + + for num in range(len(anomaly_maps)): + anomaly_maps[num] = anomaly_maps[num][:,1,:,:] + + anomaly_map_all = torch.mean(torch.stack(anomaly_maps, dim=0), dim=0).unsqueeze(1) + + anomaly_map_prompts = self.prompt_learner(anomaly_map_all) + + # img_embeds = img_embeds + anomaly_map_prompts + + input_ids, target_ids, attention_mask = process_batch_instance(self.llama_tokenizer, output_texts, self.max_tgt_len) + inputs_embeds, targets, attention_mask = self.prompt_wrap(img_embeds, input_ids, target_ids, attention_mask, anomaly_map_prompts) + + outputs = self.llama_model( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + return_dict=True, + labels=targets, + ) + loss = outputs.loss + # calculate the token accuarcy + chosen_tokens = torch.max(outputs.logits, dim=-1)[1][:, 1:-1] # [B, S-1] + labels = targets[:, 2:] + gen_acc = (chosen_tokens.reshape(-1) == labels.reshape(-1)).to(torch.long) # [B*S] + valid_mask = (labels != -100).reshape(-1) + valid_tokens = gen_acc & valid_mask # [B*S] + gen_acc = valid_tokens.sum().item() / valid_mask.sum().item() + + return loss, gen_acc + + + def extract_multimodal_feature(self, inputs, web_demo): + features = [] + if inputs['image_paths']: + + prompt = inputs['prompt'] + c_name = 'object' + for name in CLASS_NAMES: + if name in prompt: + c_name = name + break + + if not web_demo: + image_embeds, _, patch_tokens = self.encode_image(inputs['image_paths']) + feats_text_tensor = encode_text_with_prompt_ensemble(self.visual_encoder, [c_name], self.device) + else: + image_embeds, _, patch_tokens = self.encode_image_for_web_demo(inputs['image_paths']) + feats_text_tensor = encode_text_with_prompt_ensemble(self.visual_encoder, ['object'], self.device) + + anomaly_maps = [] + for layer in range(len(patch_tokens)): + patch_tokens[layer] = patch_tokens[layer] / patch_tokens[layer].norm(dim=-1, keepdim=True) + # print(patch_tokens[layer].shape) + # anomaly_map = torch.bmm(patch_tokens[layer], feats_text_tensor.transpose(-2,-1)) + anomaly_map = (100.0 * patch_tokens[layer] @ feats_text_tensor.transpose(-2,-1)) + B, L, C = anomaly_map.shape + H = int(np.sqrt(L)) + anomaly_map = F.interpolate(anomaly_map.permute(0, 2, 1).view(B, 2, H, H), + size=224, mode='bilinear', align_corners=True) + anomaly_map = torch.softmax(anomaly_map, dim=1) + anomaly_maps.append(anomaly_map[:,1,:,:]) + + anomaly_map_ret = torch.mean(torch.stack(anomaly_maps, dim=0), dim=0).unsqueeze(1) + # anomaly_map_all = anomaly_map_ret.unsqueeze(1).repeat((1,3,1,1)) + # anomaly_map_feature, _, _ = self.encode_image_from_tensor(anomaly_map_all) + # image_embeds = anomaly_map_feature + image_embeds + if inputs['normal_img_paths']: + query_patch_tokens = self.encode_image_for_one_shot(inputs['image_paths']) + if 'mvtec' in 'normal_img_paths': + normal_patch_tokens = self.encode_image_for_one_shot_with_aug(inputs['normal_img_paths']) + else: + normal_patch_tokens = self.encode_image_for_one_shot(inputs['normal_img_paths']) + sims = [] + + for i in range(len(query_patch_tokens)): + query_patch_tokens_reshaped = query_patch_tokens[i].view(256,1,1280) + normal_tokens_reshaped = normal_patch_tokens[i].reshape(1,-1,1280) + cosine_similarity_matrix = F.cosine_similarity(query_patch_tokens_reshaped, normal_tokens_reshaped, dim=2) + sim_max, _ = torch.max(cosine_similarity_matrix, dim=1) + sims.append(sim_max) + + sim = torch.mean(torch.stack(sims,dim=0), dim=0).reshape(1,1,16,16) + sim = F.interpolate(sim,size=224, mode='bilinear', align_corners=True) + anomaly_map_ret = 1 - sim # (anomaly_map_ret + 1 - sim) / 2 + + + features.append(image_embeds) + if inputs['audio_paths']: + audio_embeds, _ = self.encode_audio(inputs['audio_paths']) + features.append(audio_embeds) + if inputs['video_paths']: + video_embeds, _ = self.encode_video(inputs['video_paths']) + features.append(video_embeds) + if inputs['thermal_paths']: + thermal_embeds, _ = self.encode_thermal(inputs['thermal_paths']) + features.append(thermal_embeds) + + feature_embeds = torch.cat(features).sum(dim=0).unsqueeze(0) + return feature_embeds, anomaly_map_ret + + def prepare_generation_embedding(self, inputs, web_demo): + prompt = inputs['prompt'] + # if len(inputs['modality_embeds']) == 1: + # feature_embeds = inputs['modality_embeds'][0] + # else: + feature_embeds, anomaly_map = self.extract_multimodal_feature(inputs, web_demo) + # print(anomaly_map.shape) + inputs['modality_embeds'].append(feature_embeds) + + batch_size = feature_embeds.shape[0] + p_before = PROMPT_START + p_before_tokens = self.llama_tokenizer(p_before, + return_tensors="pt", add_special_tokens=False).to(self.device) + p_before_embeds = self.llama_model.model.model.embed_tokens(p_before_tokens.input_ids).expand(batch_size, -1, -1) # bsz x s1 x embed_dim + + p_middle = ' ' + p_middle_tokens = self.llama_tokenizer(p_middle, + return_tensors="pt", add_special_tokens=False).to(self.device) + # peft model need deeper call + p_middle_embeds = self.llama_model.model.model.embed_tokens(p_middle_tokens.input_ids).expand(batch_size, -1, -1) # bsz x s1 x embed_dim + + # self.prompt_learner.eval() + anomaly_map_prompts = self.prompt_learner(anomaly_map) + + + + + text = prompt + '\n### Assistant:' + p_after_tokens = self.llama_tokenizer(text, add_special_tokens=False, return_tensors='pt').to(self.device) + p_after_embeds = self.llama_model.model.model.embed_tokens(p_after_tokens.input_ids).expand(batch_size, -1, -1) # bsz x s2 x embed_dim + bos = torch.ones([batch_size, 1], + dtype=p_before_tokens.input_ids.dtype, + device=p_before_tokens.input_ids.device) * self.llama_tokenizer.bos_token_id # bsz x 1 + bos_embeds = self.llama_model.model.model.embed_tokens(bos) # bsz x 1 x embed_dim + inputs_embeds = torch.cat([bos_embeds, p_before_embeds, feature_embeds, p_middle_embeds, anomaly_map_prompts, p_after_embeds], dim=1) # bsz x (1+s1+1+s2) x embed_dim + + return inputs_embeds, anomaly_map + + def generate(self, inputs, web_demo=False): + ''' + inputs = { + 'image_paths': optional, + 'audio_paths': optional + 'video_paths': optional + 'thermal_paths': optional + 'mode': generation mode, + 'prompt': human input prompt, + 'max_tgt_len': generation length, + 'top_p': top_p, + 'temperature': temperature + 'modality_embeds': None or torch.tensor + 'modality_cache': save the image cache + } + ''' + # self.prompt_learner.eval() + # self.llama_model.eval() + # self.llama_proj.eval() + # self.image_decoder.eval() + # self.llama_tokenizer.eval() + input_embeds, pixel_output = self.prepare_generation_embedding(inputs, web_demo) + stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=[2277], encounters=1)]) + outputs = self.llama_model.generate( + inputs_embeds=input_embeds, + max_new_tokens=inputs['max_tgt_len'], + top_p=inputs['top_p'], + temperature=inputs['temperature'], + do_sample=True, + use_cache=True, + stopping_criteria=stopping_criteria, + ) + output_text = self.llama_tokenizer.decode(outputs[0][:-2], skip_special_tokens=True) + return output_text, pixel_output \ No newline at end of file diff --git a/pretrained_ckpt/imagebind_ckpt/imagebind_huge.pth b/pretrained_ckpt/imagebind_ckpt/imagebind_huge.pth new file mode 100644 index 0000000000000000000000000000000000000000..21709a82dbb35fca042d46877b89074bc1885401 --- /dev/null +++ b/pretrained_ckpt/imagebind_ckpt/imagebind_huge.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6f6c22bedcc90708448d5d2fbb7b2db9c73f505dc89bd0b2e09b23af1b62157 +size 4803584173 diff --git a/pretrained_ckpt/pandagpt_ckpt/13b/empty.txt b/pretrained_ckpt/pandagpt_ckpt/13b/empty.txt new file mode 100644 index 0000000000000000000000000000000000000000..216a6c6abc8e48838ed2e629908f322ee778ccd1 --- /dev/null +++ b/pretrained_ckpt/pandagpt_ckpt/13b/empty.txt @@ -0,0 +1 @@ +empty placeholder \ No newline at end of file diff --git a/pretrained_ckpt/pandagpt_ckpt/7b/empty.txt b/pretrained_ckpt/pandagpt_ckpt/7b/empty.txt new file mode 100644 index 0000000000000000000000000000000000000000..216a6c6abc8e48838ed2e629908f322ee778ccd1 --- /dev/null +++ b/pretrained_ckpt/pandagpt_ckpt/7b/empty.txt @@ -0,0 +1 @@ +empty placeholder \ No newline at end of file diff --git a/pretrained_ckpt/pandagpt_ckpt/7b/pytorch_model.pt b/pretrained_ckpt/pandagpt_ckpt/7b/pytorch_model.pt new file mode 100644 index 0000000000000000000000000000000000000000..0f970dd0346ea64eddefabd8f8975d2c6fd59576 --- /dev/null +++ b/pretrained_ckpt/pandagpt_ckpt/7b/pytorch_model.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0d56d6fc00051c10c554f4d2aa9f8410e24ee6b754ae94dc8e2373d49571b94 +size 75557949 diff --git a/pretrained_ckpt/vicuna_ckpt/13b_v0/empty.txt b/pretrained_ckpt/vicuna_ckpt/13b_v0/empty.txt new file mode 100644 index 0000000000000000000000000000000000000000..216a6c6abc8e48838ed2e629908f322ee778ccd1 --- /dev/null +++ b/pretrained_ckpt/vicuna_ckpt/13b_v0/empty.txt @@ -0,0 +1 @@ +empty placeholder \ No newline at end of file diff --git a/pretrained_ckpt/vicuna_ckpt/7b_v0/config.json b/pretrained_ckpt/vicuna_ckpt/7b_v0/config.json new file mode 100644 index 0000000000000000000000000000000000000000..536beaa7110050c8f9109d41ed3b18643fd4220e --- /dev/null +++ b/pretrained_ckpt/vicuna_ckpt/7b_v0/config.json @@ -0,0 +1,23 @@ +{ + "_name_or_path": "llama-7b-hf-transformers-4.29/", + "architectures": [ + "LlamaForCausalLM" + ], + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 4096, + "initializer_range": 0.02, + "intermediate_size": 11008, + "max_position_embeddings": 2048, + "model_type": "llama", + "num_attention_heads": 32, + "num_hidden_layers": 32, + "pad_token_id": 0, + "rms_norm_eps": 1e-06, + "tie_word_embeddings": false, + "torch_dtype": "float16", + "transformers_version": "4.29.1", + "use_cache": true, + "vocab_size": 32001 +} diff --git a/pretrained_ckpt/vicuna_ckpt/7b_v0/generation_config.json b/pretrained_ckpt/vicuna_ckpt/7b_v0/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..684bc56cb1fb502fe6bfecbc2bb6713f2db918d7 --- /dev/null +++ b/pretrained_ckpt/vicuna_ckpt/7b_v0/generation_config.json @@ -0,0 +1,7 @@ +{ + "_from_model_config": true, + "bos_token_id": 1, + "eos_token_id": 2, + "pad_token_id": 0, + "transformers_version": "4.29.1" +} diff --git a/pretrained_ckpt/vicuna_ckpt/7b_v0/pytorch_model-00001-of-00002.bin b/pretrained_ckpt/vicuna_ckpt/7b_v0/pytorch_model-00001-of-00002.bin new file mode 100644 index 0000000000000000000000000000000000000000..7260e497ec8b9bb7bd27c1cfe8ded8d75c1cdb28 --- /dev/null +++ b/pretrained_ckpt/vicuna_ckpt/7b_v0/pytorch_model-00001-of-00002.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef1b2c502e2eab32176400bd8af8636163619fb04e65c0c0fdea58f1cbe21807 +size 9976642750 diff --git a/pretrained_ckpt/vicuna_ckpt/7b_v0/pytorch_model-00002-of-00002.bin b/pretrained_ckpt/vicuna_ckpt/7b_v0/pytorch_model-00002-of-00002.bin new file mode 100644 index 0000000000000000000000000000000000000000..5efbbfd673521baf733d9cc2a6c018f127552e27 --- /dev/null +++ b/pretrained_ckpt/vicuna_ckpt/7b_v0/pytorch_model-00002-of-00002.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f3789f864bf21ca0733d782022e3656759728151fab435e6799696124099a9a +size 3500323731 diff --git a/pretrained_ckpt/vicuna_ckpt/7b_v0/pytorch_model.bin.index.json b/pretrained_ckpt/vicuna_ckpt/7b_v0/pytorch_model.bin.index.json new file mode 100644 index 0000000000000000000000000000000000000000..c881ac33f7dd7c892c8f29e900b7b675867fb062 --- /dev/null +++ b/pretrained_ckpt/vicuna_ckpt/7b_v0/pytorch_model.bin.index.json @@ -0,0 +1,330 @@ +{ + "metadata": { + "total_size": 13476855808 + }, + "weight_map": { + "lm_head.weight": "pytorch_model-00002-of-00002.bin", + "model.embed_tokens.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.10.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.11.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.11.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.12.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.12.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.13.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.13.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.14.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.14.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.15.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.15.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.16.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.16.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.17.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.17.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.18.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.18.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.19.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.19.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.20.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.20.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.21.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.21.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.22.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.22.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.22.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.23.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.23.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.23.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.24.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.24.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin", + "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.25.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.25.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin", + "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.26.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.26.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin", + "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.27.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.27.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin", + "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.28.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.28.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.28.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin", + "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.29.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.29.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.29.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin", + "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.30.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.30.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.30.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin", + "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.31.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.31.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.31.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin", + "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", + "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin", + "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", + "model.norm.weight": "pytorch_model-00002-of-00002.bin" + } +} diff --git a/pretrained_ckpt/vicuna_ckpt/7b_v0/special_tokens_map.json b/pretrained_ckpt/vicuna_ckpt/7b_v0/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..d85ba6cb6820b01226ef8bd40b46bb489041c6a8 --- /dev/null +++ b/pretrained_ckpt/vicuna_ckpt/7b_v0/special_tokens_map.json @@ -0,0 +1,23 @@ +{ + "bos_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "unk_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + } +} diff --git a/pretrained_ckpt/vicuna_ckpt/7b_v0/tokenizer.model b/pretrained_ckpt/vicuna_ckpt/7b_v0/tokenizer.model new file mode 100644 index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899 --- /dev/null +++ b/pretrained_ckpt/vicuna_ckpt/7b_v0/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 +size 499723 diff --git a/pretrained_ckpt/vicuna_ckpt/7b_v0/tokenizer_config.json b/pretrained_ckpt/vicuna_ckpt/7b_v0/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..400e3de6ffc3884ec3c158a046f6a04da00ef3ca --- /dev/null +++ b/pretrained_ckpt/vicuna_ckpt/7b_v0/tokenizer_config.json @@ -0,0 +1,33 @@ +{ + "add_bos_token": true, + "add_eos_token": false, + "bos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "clean_up_tokenization_spaces": false, + "eos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "model_max_length": 1000000000000000019884624838656, + "pad_token": null, + "sp_model_kwargs": {}, + "tokenizer_class": "LlamaTokenizer", + "unk_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + } +} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ba3eecae93d35af4312c64717ec488c3321420c2 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,25 @@ +deepspeed==0.9.2 +easydict==1.10 +einops==0.6.1 +ftfy==6.1.1 +gradio==3.41.2 +h5py==3.9.0 +iopath==0.1.10 +ipdb==0.13.13 +kornia==0.7.0 +matplotlib==3.7.2 +mdtex2html==1.2.0 +numpy==1.24.3 +open3d_python==0.3.0.0 +opencv_python==4.8.0.74 +peft==0.3.0 +Pillow==10.0.0 +pytorchvideo==0.1.5 +PyYAML==6.0.1 +regex==2022.10.31 +timm==0.6.7 +torch==1.13.1 +torchaudio==0.13.1 +torchvision==0.14.1 +tqdm==4.64.1 +transformers==4.29.1 diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/utils/__pycache__/__init__.cpython-38.pyc b/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf3dc3d0427d2e462884eb949d849600ad23f6f9 Binary files /dev/null and b/utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/utils/__pycache__/loss.cpython-38.pyc b/utils/__pycache__/loss.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1685c1322d0f037300c84afcd87ad406961206e8 Binary files /dev/null and b/utils/__pycache__/loss.cpython-38.pyc differ diff --git a/utils/build.py b/utils/build.py new file mode 100644 index 0000000000000000000000000000000000000000..9e240d74f7c1fd02b0fbbf4904819f52a9deccd3 --- /dev/null +++ b/utils/build.py @@ -0,0 +1,17 @@ +from ..utils import registry + + +DATASETS = registry.Registry('dataset') + + +def build_dataset_from_cfg(cfg, default_args = None): + """ + Build a dataset, defined by `dataset_name`. + Args: + cfg (eDICT): + Returns: + Dataset: a constructed dataset specified by dataset_name. + """ + return DATASETS.build(cfg, default_args = default_args) + + diff --git a/utils/config.py b/utils/config.py new file mode 100644 index 0000000000000000000000000000000000000000..b364ee774f8437a9962280f28748e2167a45e732 --- /dev/null +++ b/utils/config.py @@ -0,0 +1,63 @@ +import yaml +from easydict import EasyDict +import os +from .logger import print_log + +def log_args_to_file(args, pre='args', logger=None): + for key, val in args.__dict__.items(): + print_log(f'{pre}.{key} : {val}', logger = logger) + +def log_config_to_file(cfg, pre='cfg', logger=None): + for key, val in cfg.items(): + if isinstance(cfg[key], EasyDict): + print_log(f'{pre}.{key} = edict()', logger = logger) + log_config_to_file(cfg[key], pre=pre + '.' + key, logger=logger) + continue + print_log(f'{pre}.{key} : {val}', logger = logger) + +def merge_new_config(config, new_config): + for key, val in new_config.items(): + if not isinstance(val, dict): + if key == '_base_': + with open(new_config['_base_'], 'r') as f: + try: + val = yaml.load(f, Loader=yaml.FullLoader) + except: + val = yaml.load(f) + config[key] = EasyDict() + merge_new_config(config[key], val) + else: + config[key] = val + continue + if key not in config: + config[key] = EasyDict() + merge_new_config(config[key], val) + return config + +def cfg_from_yaml_file(cfg_file): + config = EasyDict() + with open(cfg_file, 'r') as f: + try: + new_config = yaml.load(f, Loader=yaml.FullLoader) + except: + new_config = yaml.load(f) + merge_new_config(config=config, new_config=new_config) + return config + +def get_config(args, logger=None): + if args.resume: + cfg_path = os.path.join(args.experiment_path, 'config.yaml') + if not os.path.exists(cfg_path): + print_log("Failed to resume", logger = logger) + raise FileNotFoundError() + print_log(f'Resume yaml from {cfg_path}', logger = logger) + args.config = cfg_path + config = cfg_from_yaml_file(args.config) + if not args.resume and args.local_rank == 0: + save_experiment_config(args, config, logger) + return config + +def save_experiment_config(args, config, logger = None): + config_path = os.path.join(args.experiment_path, 'config.yaml') + os.system('cp %s %s' % (args.config, config_path)) + print_log(f'Copy the Config file from {args.config} to {config_path}',logger = logger ) \ No newline at end of file diff --git a/utils/data_transform.py b/utils/data_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..8fa5d24f775af0d854dc9981604360412ee0dcbd --- /dev/null +++ b/utils/data_transform.py @@ -0,0 +1,339 @@ +#!/usr/bin/env python3 +# Portions Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging +import math + +import torch +import torch.nn as nn +import torchaudio +from PIL import Image +from pytorchvideo import transforms as pv_transforms +from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler +from pytorchvideo.data.encoded_video import EncodedVideo +from torchvision import transforms +from torchvision.transforms._transforms_video import NormalizeVideo + +from ..model.ImageBind.models.multimodal_preprocessors import SimpleTokenizer + +DEFAULT_AUDIO_FRAME_SHIFT_MS = 10 # in milliseconds + +BPE_PATH = "bpe/bpe_simple_vocab_16e6.txt.gz" + + +def waveform2melspec(waveform, sample_rate, num_mel_bins, target_length): + # Based on https://github.com/YuanGongND/ast/blob/d7d8b4b8e06cdaeb6c843cdb38794c1c7692234c/src/dataloader.py#L102 + waveform -= waveform.mean() + fbank = torchaudio.compliance.kaldi.fbank( + waveform, + htk_compat=True, + sample_frequency=sample_rate, + use_energy=False, + window_type="hanning", + num_mel_bins=num_mel_bins, + dither=0.0, + frame_length=25, + frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS, + ) + # Convert to [mel_bins, num_frames] shape + fbank = fbank.transpose(0, 1) + # Pad to target_length + n_frames = fbank.size(1) + p = target_length - n_frames + # if p is too large (say >20%), flash a warning + if abs(p) / n_frames > 0.2: + logging.warning( + "Large gap between audio n_frames(%d) and " + "target_length (%d). Is the audio_target_length " + "setting correct?", + n_frames, + target_length, + ) + # cut and pad + if p > 0: + fbank = torch.nn.functional.pad(fbank, (0, p), mode="constant", value=0) + elif p < 0: + fbank = fbank[:, 0:target_length] + # Convert to [1, mel_bins, num_frames] shape, essentially like a 1 + # channel image + fbank = fbank.unsqueeze(0) + return fbank + + +def get_clip_timepoints(clip_sampler, duration): + # Read out all clips in this video + all_clips_timepoints = [] + is_last_clip = False + end = 0.0 + while not is_last_clip: + start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None) + all_clips_timepoints.append((start, end)) + return all_clips_timepoints + + + +def load_and_transform_vision_data(image_paths, device): + if image_paths is None: + return None + + image_ouputs = [] + for image_path in image_paths: + data_transform = transforms.Compose( + [ + transforms.Resize( + 224, interpolation=transforms.InterpolationMode.BICUBIC + ), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize( + mean=(0.48145466, 0.4578275, 0.40821073), + std=(0.26862954, 0.26130258, 0.27577711), + ), + ] + ) + with open(image_path, "rb") as fopen: + image = Image.open(fopen).convert("RGB") + + image = data_transform(image) + image_ouputs.append(image) + return torch.stack(image_ouputs, dim=0) + + +def load_and_transform_text(text, device): + if text is None: + return None + tokenizer = SimpleTokenizer(bpe_path=BPE_PATH) + tokens = [tokenizer(t).unsqueeze(0) for t in text] + tokens = torch.cat(tokens, dim=0) + return tokens + + +def load_and_transform_audio_data( + audio_paths, + device, + num_mel_bins=128, + target_length=204, + sample_rate=16000, + clip_duration=2, + clips_per_video=3, + mean=-4.268, + std=9.138, +): + if audio_paths is None: + return None + + audio_outputs = [] + clip_sampler = ConstantClipsPerVideoSampler( + clip_duration=clip_duration, clips_per_video=clips_per_video + ) + + for audio_path in audio_paths: + waveform, sr = torchaudio.load(audio_path) + if sample_rate != sr: + waveform = torchaudio.functional.resample( + waveform, orig_freq=sr, new_freq=sample_rate + ) + all_clips_timepoints = get_clip_timepoints( + clip_sampler, waveform.size(1) / sample_rate + ) + all_clips = [] + for clip_timepoints in all_clips_timepoints: + waveform_clip = waveform[ + :, + int(clip_timepoints[0] * sample_rate) : int( + clip_timepoints[1] * sample_rate + ), + ] + waveform_melspec = waveform2melspec( + waveform_clip, sample_rate, num_mel_bins, target_length + ) + all_clips.append(waveform_melspec) + + normalize = transforms.Normalize(mean=mean, std=std) + all_clips = [normalize(ac) for ac in all_clips] + + all_clips = torch.stack(all_clips, dim=0) + audio_outputs.append(all_clips) + + return torch.stack(audio_outputs, dim=0) + + +def crop_boxes(boxes, x_offset, y_offset): + """ + Perform crop on the bounding boxes given the offsets. + Args: + boxes (ndarray or None): bounding boxes to perform crop. The dimension + is `num boxes` x 4. + x_offset (int): cropping offset in the x axis. + y_offset (int): cropping offset in the y axis. + Returns: + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + cropped_boxes = boxes.copy() + cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset + cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset + + return cropped_boxes + + +def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): + """ + Perform uniform spatial sampling on the images and corresponding boxes. + Args: + images (tensor): images to perform uniform crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): size of height and weight to crop the images. + spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width + is larger than height. Or 0, 1, or 2 for top, center, and bottom + crop if height is larger than width. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + scale_size (int): optinal. If not None, resize the images to scale_size before + performing any crop. + Returns: + cropped (tensor): images with dimension of + `num frames` x `channel` x `size` x `size`. + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + assert spatial_idx in [0, 1, 2] + ndim = len(images.shape) + if ndim == 3: + images = images.unsqueeze(0) + height = images.shape[2] + width = images.shape[3] + + if scale_size is not None: + if width <= height: + width, height = scale_size, int(height / width * scale_size) + else: + width, height = int(width / height * scale_size), scale_size + images = torch.nn.functional.interpolate( + images, + size=(height, width), + mode="bilinear", + align_corners=False, + ) + + y_offset = int(math.ceil((height - size) / 2)) + x_offset = int(math.ceil((width - size) / 2)) + + if height > width: + if spatial_idx == 0: + y_offset = 0 + elif spatial_idx == 2: + y_offset = height - size + else: + if spatial_idx == 0: + x_offset = 0 + elif spatial_idx == 2: + x_offset = width - size + cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size] + cropped_boxes = crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None + if ndim == 3: + cropped = cropped.squeeze(0) + return cropped, cropped_boxes + + +class SpatialCrop(nn.Module): + """ + Convert the video into 3 smaller clips spatially. Must be used after the + temporal crops to get spatial crops, and should be used with + -2 in the spatial crop at the slowfast augmentation stage (so full + frames are passed in here). Will return a larger list with the + 3x spatial crops as well. + """ + + def __init__(self, crop_size: int = 224, num_crops: int = 3): + super().__init__() + self.crop_size = crop_size + if num_crops == 3: + self.crops_to_ext = [0, 1, 2] + self.flipped_crops_to_ext = [] + elif num_crops == 1: + self.crops_to_ext = [1] + self.flipped_crops_to_ext = [] + else: + raise NotImplementedError("Nothing else supported yet") + + def forward(self, videos): + """ + Args: + videos: A list of C, T, H, W videos. + Returns: + videos: A list with 3x the number of elements. Each video converted + to C, T, H', W' by spatial cropping. + """ + assert isinstance(videos, list), "Must be a list of videos after temporal crops" + assert all([video.ndim == 4 for video in videos]), "Must be (C,T,H,W)" + res = [] + for video in videos: + for spatial_idx in self.crops_to_ext: + res.append(uniform_crop(video, self.crop_size, spatial_idx)[0]) + if not self.flipped_crops_to_ext: + continue + flipped_video = transforms.functional.hflip(video) + for spatial_idx in self.flipped_crops_to_ext: + res.append(uniform_crop(flipped_video, self.crop_size, spatial_idx)[0]) + return res + + +def load_and_transform_video_data( + video_paths, + device, + clip_duration=2, + clips_per_video=5, + sample_rate=16000, +): + if video_paths is None: + return None + + video_outputs = [] + video_transform = transforms.Compose( + [ + pv_transforms.ShortSideScale(224), + NormalizeVideo( + mean=(0.48145466, 0.4578275, 0.40821073), + std=(0.26862954, 0.26130258, 0.27577711), + ), + ] + ) + + clip_sampler = ConstantClipsPerVideoSampler( + clip_duration=clip_duration, clips_per_video=clips_per_video + ) + frame_sampler = pv_transforms.UniformTemporalSubsample(num_samples=clip_duration) + + for video_path in video_paths: + video = EncodedVideo.from_path( + video_path, + decoder="decord", + decode_audio=False, + **{"sample_rate": sample_rate}, + ) + + all_clips_timepoints = get_clip_timepoints(clip_sampler, video.duration) + + all_video = [] + for clip_timepoints in all_clips_timepoints: + # Read the clip, get frames + clip = video.get_clip(clip_timepoints[0], clip_timepoints[1]) + if clip is None: + raise ValueError("No clip found") + video_clip = frame_sampler(clip["video"]) + video_clip = video_clip / 255.0 # since this is float, need 0-1 + + all_video.append(video_clip) + + all_video = [video_transform(clip) for clip in all_video] + all_video = SpatialCrop(224, num_crops=3)(all_video) + + all_video = torch.stack(all_video, dim=0) + video_outputs.append(all_video) + + return torch.stack(video_outputs, dim=0) diff --git a/utils/io.py b/utils/io.py new file mode 100644 index 0000000000000000000000000000000000000000..d0edd1dd450d18981c545a9cb7460184186d6708 --- /dev/null +++ b/utils/io.py @@ -0,0 +1,42 @@ +import h5py +import numpy as np +import open3d +import os + +class IO: + @classmethod + def get(cls, file_path): + _, file_extension = os.path.splitext(file_path) + + if file_extension in ['.npy']: + return cls._read_npy(file_path) + elif file_extension in ['.pcd']: + return cls._read_pcd(file_path) + elif file_extension in ['.h5']: + return cls._read_h5(file_path) + elif file_extension in ['.txt']: + return cls._read_txt(file_path) + else: + raise Exception('Unsupported file extension: %s' % file_extension) + + # References: https://github.com/numpy/numpy/blob/master/numpy/lib/format.py + @classmethod + def _read_npy(cls, file_path): + return np.load(file_path) + + # References: https://github.com/dimatura/pypcd/blob/master/pypcd/pypcd.py#L275 + # Support PCD files without compression ONLY! + @classmethod + def _read_pcd(cls, file_path): + pc = open3d.io.read_point_cloud(file_path) + ptcloud = np.array(pc.points) + return ptcloud + + @classmethod + def _read_txt(cls, file_path): + return np.loadtxt(file_path) + + @classmethod + def _read_h5(cls, file_path): + f = h5py.File(file_path, 'r') + return f['data'][()] \ No newline at end of file diff --git a/utils/logger.py b/utils/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..847c1c7a2f50f310cd5daf96b928838c1c293525 --- /dev/null +++ b/utils/logger.py @@ -0,0 +1,127 @@ +import logging +import torch.distributed as dist + +logger_initialized = {} + +def get_root_logger(log_file=None, log_level=logging.INFO, name='main'): + """Get root logger and add a keyword filter to it. + The logger will be initialized if it has not been initialized. By default a + StreamHandler will be added. If `log_file` is specified, a FileHandler will + also be added. The name of the root logger is the top-level package name, + e.g., "mmdet3d". + Args: + log_file (str, optional): File path of log. Defaults to None. + log_level (int, optional): The level of logger. + Defaults to logging.INFO. + name (str, optional): The name of the root logger, also used as a + filter keyword. Defaults to 'mmdet3d'. + Returns: + :obj:`logging.Logger`: The obtained logger + """ + logger = get_logger(name=name, log_file=log_file, log_level=log_level) + # add a logging filter + logging_filter = logging.Filter(name) + logging_filter.filter = lambda record: record.find(name) != -1 + + return logger + + +def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'): + """Initialize and get a logger by name. + If the logger has not been initialized, this method will initialize the + logger by adding one or two handlers, otherwise the initialized logger will + be directly returned. During initialization, a StreamHandler will always be + added. If `log_file` is specified and the process rank is 0, a FileHandler + will also be added. + Args: + name (str): Logger name. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the logger. + log_level (int): The logger level. Note that only the process of + rank 0 is affected, and other processes will set the level to + "Error" thus be silent most of the time. + file_mode (str): The file mode used in opening log file. + Defaults to 'w'. + Returns: + logging.Logger: The expected logger. + """ + logger = logging.getLogger(name) + if name in logger_initialized: + return logger + # handle hierarchical names + # e.g., logger "a" is initialized, then logger "a.b" will skip the + # initialization since it is a child of "a". + for logger_name in logger_initialized: + if name.startswith(logger_name): + return logger + + # handle duplicate logs to the console + # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler (NOTSET) + # to the root logger. As logger.propagate is True by default, this root + # level handler causes logging messages from rank>0 processes to + # unexpectedly show up on the console, creating much unwanted clutter. + # To fix this issue, we set the root logger's StreamHandler, if any, to log + # at the ERROR level. + for handler in logger.root.handlers: + if type(handler) is logging.StreamHandler: + handler.setLevel(logging.ERROR) + + stream_handler = logging.StreamHandler() + handlers = [stream_handler] + + if dist.is_available() and dist.is_initialized(): + rank = dist.get_rank() + else: + rank = 0 + + # only rank 0 will add a FileHandler + if rank == 0 and log_file is not None: + # Here, the default behaviour of the official logger is 'a'. Thus, we + # provide an interface to change the file mode to the default + # behaviour. + file_handler = logging.FileHandler(log_file, file_mode) + handlers.append(file_handler) + + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + for handler in handlers: + handler.setFormatter(formatter) + handler.setLevel(log_level) + logger.addHandler(handler) + + if rank == 0: + logger.setLevel(log_level) + else: + logger.setLevel(logging.ERROR) + + logger_initialized[name] = True + + + return logger + + +def print_log(msg, logger=None, level=logging.INFO): + """Print a log message. + Args: + msg (str): The message to be logged. + logger (logging.Logger | str | None): The logger to be used. + Some special loggers are: + - "silent": no message will be printed. + - other str: the logger obtained with `get_root_logger(logger)`. + - None: The `print()` method will be used to print log messages. + level (int): Logging level. Only available when `logger` is a Logger + object or "root". + """ + if logger is None: + print(msg) + elif isinstance(logger, logging.Logger): + logger.log(level, msg) + elif logger == 'silent': + pass + elif isinstance(logger, str): + _logger = get_logger(logger) + _logger.log(level, msg) + else: + raise TypeError( + 'logger should be either a logging.Logger object, str, ' + f'"silent" or None, but got {type(logger)}') \ No newline at end of file diff --git a/utils/loss.py b/utils/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..104c80995dc5216ce5cfa1aa44fe570551555c2a --- /dev/null +++ b/utils/loss.py @@ -0,0 +1,117 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from math import exp + +class FocalLoss(nn.Module): + """ + copy from: https://github.com/Hsuxu/Loss_ToolBox-PyTorch/blob/master/FocalLoss/FocalLoss.py + This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in + 'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)' + Focal_Loss= -1*alpha*(1-pt)*log(pt) + :param alpha: (tensor) 3D or 4D the scalar factor for this criterion + :param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more + focus on hard misclassified example + :param smooth: (float,double) smooth value when cross entropy + :param balance_index: (int) balance class index, should be specific when alpha is float + :param size_average: (bool, optional) By default, the losses are averaged over each loss element in the batch. + """ + + def __init__(self, apply_nonlin=None, alpha=None, gamma=2, balance_index=0, smooth=1e-5, size_average=True): + super(FocalLoss, self).__init__() + self.apply_nonlin = apply_nonlin + self.alpha = alpha + self.gamma = gamma + self.balance_index = balance_index + self.smooth = smooth + self.size_average = size_average + + if self.smooth is not None: + if self.smooth < 0 or self.smooth > 1.0: + raise ValueError('smooth value should be in [0,1]') + + def forward(self, logit, target): + # logit: [B, 2, 224, 224] + # target:[B, 1, 224, 224] + if self.apply_nonlin is not None: + logit = self.apply_nonlin(logit) + # 2 + num_class = logit.shape[1] + + if logit.dim() > 2: + # N,C,d1,d2 -> N,C,m (m=d1*d2*...) + # [B, 2, 224*224] + logit = logit.view(logit.size(0), logit.size(1), -1) + # [B, 224*224, 2] + logit = logit.permute(0, 2, 1).contiguous() + # [B*224*224, 2] + logit = logit.view(-1, logit.size(-1)) + target = torch.squeeze(target, 1) + # [B*224*224, 1] + target = target.view(-1, 1) + alpha = self.alpha + + if alpha is None: + alpha = torch.ones(num_class, 1) + elif isinstance(alpha, (list, np.ndarray)): + assert len(alpha) == num_class + alpha = torch.FloatTensor(alpha).view(num_class, 1) + alpha = alpha / alpha.sum() + elif isinstance(alpha, float): + alpha = torch.ones(num_class, 1) + alpha = alpha * (1 - self.alpha) + alpha[self.balance_index] = self.alpha + + else: + raise TypeError('Not support alpha type') + + if alpha.device != logit.device: + alpha = alpha.to(logit.device) + + # [B*224*224, 1] + idx = target.cpu().long() + + # [B*224*224, 2] + one_hot_key = torch.FloatTensor(target.size(0), num_class).zero_() + + one_hot_key = one_hot_key.scatter_(1, idx, 1) + if one_hot_key.device != logit.device: + one_hot_key = one_hot_key.to(logit.device) + + if self.smooth: + one_hot_key = torch.clamp( + one_hot_key, self.smooth / (num_class - 1), 1.0 - self.smooth) + pt = (one_hot_key * logit).sum(1) + self.smooth + logpt = pt.log() + + gamma = self.gamma + + alpha = alpha[idx] + alpha = torch.squeeze(alpha) + loss = -1 * alpha * torch.pow((1 - pt), gamma) * logpt + + if self.size_average: + loss = loss.mean() + return loss + + +class BinaryDiceLoss(nn.Module): + def __init__(self): + super(BinaryDiceLoss, self).__init__() + + def forward(self, input, targets): + # 获取每个批次的大小 N + N = targets.size()[0] + # 平滑变量 + smooth = 1 + # 将宽高 reshape 到同一纬度 + input_flat = input.view(N, -1) + targets_flat = targets.view(N, -1) + + # 计算交集 + intersection = input_flat * targets_flat + N_dice_eff = (2 * intersection.sum(1) + smooth) / (input_flat.sum(1) + targets_flat.sum(1) + smooth) + # 计算一个批次中平均每张图的损失 + loss = 1 - N_dice_eff.sum() / N + return loss \ No newline at end of file diff --git a/utils/registry.py b/utils/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..60c4dc43f54ab21b8d8ef813bfdb175d9efb4992 --- /dev/null +++ b/utils/registry.py @@ -0,0 +1,288 @@ +import inspect +import warnings +from functools import partial +from . import config + +class Registry: + """A registry to map strings to classes. + Registered object could be built from registry. + Example: + >>> MODELS = Registry('models') + >>> @MODELS.register_module() + >>> class ResNet: + >>> pass + >>> resnet = MODELS.build(dict(NAME='ResNet')) + Please refer to https://mmcv.readthedocs.io/en/latest/registry.html for + advanced useage. + Args: + name (str): Registry name. + build_func(func, optional): Build function to construct instance from + Registry, func:`build_from_cfg` is used if neither ``parent`` or + ``build_func`` is specified. If ``parent`` is specified and + ``build_func`` is not given, ``build_func`` will be inherited + from ``parent``. Default: None. + parent (Registry, optional): Parent registry. The class registered in + children registry could be built from parent. Default: None. + scope (str, optional): The scope of registry. It is the key to search + for children registry. If not specified, scope will be the name of + the package where class is defined, e.g. mmdet, mmcls, mmseg. + Default: None. + """ + + def __init__(self, name, build_func=None, parent=None, scope=None): + self._name = name + self._module_dict = dict() + self._children = dict() + self._scope = self.infer_scope() if scope is None else scope + + # self.build_func will be set with the following priority: + # 1. build_func + # 2. parent.build_func + # 3. build_from_cfg + if build_func is None: + if parent is not None: + self.build_func = parent.build_func + else: + self.build_func = build_from_cfg + else: + self.build_func = build_func + if parent is not None: + assert isinstance(parent, Registry) + parent._add_children(self) + self.parent = parent + else: + self.parent = None + + def __len__(self): + return len(self._module_dict) + + def __contains__(self, key): + return self.get(key) is not None + + def __repr__(self): + format_str = self.__class__.__name__ + \ + f'(name={self._name}, ' \ + f'items={self._module_dict})' + return format_str + + @staticmethod + def infer_scope(): + """Infer the scope of registry. + The name of the package where registry is defined will be returned. + Example: + # in mmdet/models/backbone/resnet.py + >>> MODELS = Registry('models') + >>> @MODELS.register_module() + >>> class ResNet: + >>> pass + The scope of ``ResNet`` will be ``mmdet``. + Returns: + scope (str): The inferred scope name. + """ + # inspect.stack() trace where this function is called, the index-2 + # indicates the frame where `infer_scope()` is called + filename = inspect.getmodule(inspect.stack()[2][0]).__name__ + split_filename = filename.split('.') + return split_filename[0] + + @staticmethod + def split_scope_key(key): + """Split scope and key. + The first scope will be split from key. + Examples: + >>> Registry.split_scope_key('mmdet.ResNet') + 'mmdet', 'ResNet' + >>> Registry.split_scope_key('ResNet') + None, 'ResNet' + Return: + scope (str, None): The first scope. + key (str): The remaining key. + """ + split_index = key.find('.') + if split_index != -1: + return key[:split_index], key[split_index + 1:] + else: + return None, key + + @property + def name(self): + return self._name + + @property + def scope(self): + return self._scope + + @property + def module_dict(self): + return self._module_dict + + @property + def children(self): + return self._children + + def get(self, key): + """Get the registry record. + Args: + key (str): The class name in string format. + Returns: + class: The corresponding class. + """ + scope, real_key = self.split_scope_key(key) + if scope is None or scope == self._scope: + # get from self + if real_key in self._module_dict: + return self._module_dict[real_key] + else: + # get from self._children + if scope in self._children: + return self._children[scope].get(real_key) + else: + # goto root + parent = self.parent + while parent.parent is not None: + parent = parent.parent + return parent.get(key) + + def build(self, *args, **kwargs): + return self.build_func(*args, **kwargs, registry=self) + + def _add_children(self, registry): + """Add children for a registry. + The ``registry`` will be added as children based on its scope. + The parent registry could build objects from children registry. + Example: + >>> models = Registry('models') + >>> mmdet_models = Registry('models', parent=models) + >>> @mmdet_models.register_module() + >>> class ResNet: + >>> pass + >>> resnet = models.build(dict(NAME='mmdet.ResNet')) + """ + + assert isinstance(registry, Registry) + assert registry.scope is not None + assert registry.scope not in self.children, \ + f'scope {registry.scope} exists in {self.name} registry' + self.children[registry.scope] = registry + + def _register_module(self, module_class, module_name=None, force=False): + if not inspect.isclass(module_class): + raise TypeError('module must be a class, ' + f'but got {type(module_class)}') + + if module_name is None: + module_name = module_class.__name__ + if isinstance(module_name, str): + module_name = [module_name] + for name in module_name: + if not force and name in self._module_dict: + raise KeyError(f'{name} is already registered ' + f'in {self.name}') + self._module_dict[name] = module_class + + def deprecated_register_module(self, cls=None, force=False): + warnings.warn( + 'The old API of register_module(module, force=False) ' + 'is deprecated and will be removed, please use the new API ' + 'register_module(name=None, force=False, module=None) instead.') + if cls is None: + return partial(self.deprecated_register_module, force=force) + self._register_module(cls, force=force) + return cls + + def register_module(self, name=None, force=False, module=None): + """Register a module. + A record will be added to `self._module_dict`, whose key is the class + name or the specified name, and value is the class itself. + It can be used as a decorator or a normal function. + Example: + >>> backbones = Registry('backbone') + >>> @backbones.register_module() + >>> class ResNet: + >>> pass + >>> backbones = Registry('backbone') + >>> @backbones.register_module(name='mnet') + >>> class MobileNet: + >>> pass + >>> backbones = Registry('backbone') + >>> class ResNet: + >>> pass + >>> backbones.register_module(ResNet) + Args: + name (str | None): The module name to be registered. If not + specified, the class name will be used. + force (bool, optional): Whether to override an existing class with + the same name. Default: False. + module (type): Module class to be registered. + """ + if not isinstance(force, bool): + raise TypeError(f'force must be a boolean, but got {type(force)}') + # NOTE: This is a walkaround to be compatible with the old api, + # while it may introduce unexpected bugs. + if isinstance(name, type): + return self.deprecated_register_module(name, force=force) + + # raise the error ahead of time + if not (name is None or isinstance(name, str) or misc.is_seq_of(name, str)): + raise TypeError( + 'name must be either of None, an instance of str or a sequence' + f' of str, but got {type(name)}') + + # use it as a normal method: x.register_module(module=SomeClass) + if module is not None: + self._register_module( + module_class=module, module_name=name, force=force) + return module + + # use it as a decorator: @x.register_module() + def _register(cls): + self._register_module( + module_class=cls, module_name=name, force=force) + return cls + + return _register + + +def build_from_cfg(cfg, registry, default_args=None): + """Build a module from config dict. + Args: + cfg (edict): Config dict. It should at least contain the key "NAME". + registry (:obj:`Registry`): The registry to search the type from. + Returns: + object: The constructed object. + """ + if not isinstance(cfg, dict): + raise TypeError(f'cfg must be a dict, but got {type(cfg)}') + if 'NAME' not in cfg: + if default_args is None or 'NAME' not in default_args: + raise KeyError( + '`cfg` or `default_args` must contain the key "NAME", ' + f'but got {cfg}\n{default_args}') + if not isinstance(registry, Registry): + raise TypeError('registry must be an mmcv.Registry object, ' + f'but got {type(registry)}') + + if not (isinstance(default_args, dict) or default_args is None): + raise TypeError('default_args must be a dict or None, ' + f'but got {type(default_args)}') + + if default_args is not None: + cfg = config.merge_new_config(cfg, default_args) + + obj_type = cfg.get('NAME') + + if isinstance(obj_type, str): + obj_cls = registry.get(obj_type) + if obj_cls is None: + raise KeyError( + f'{obj_type} is not in the {registry.name} registry') + elif inspect.isclass(obj_type): + obj_cls = obj_type + else: + raise TypeError( + f'type must be a str or valid type, but got {type(obj_type)}') + try: + return obj_cls(cfg) + except Exception as e: + # Normal TypeError does not print class name. + raise type(e)(f'{obj_cls.__name__}: {e}') \ No newline at end of file diff --git a/utils/utils.py b/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8ee2144bdb6481fcd028bbda410d33778013864b --- /dev/null +++ b/utils/utils.py @@ -0,0 +1,242 @@ +import numpy as np +import os +import random +import shutil +import torch +import torch.distributed as dist +import torch.autograd as autograd + +from PIL import ImageFilter +from easydict import EasyDict +import yaml +# from datas.dataset_3d import Dataset_3D + +def merge_new_config(config, new_config): + for key, val in new_config.items(): + if not isinstance(val, dict): + if key == '_base_': + with open(new_config['_base_'], 'r') as f: + try: + val = yaml.load(f, Loader=yaml.FullLoader) + except: + val = yaml.load(f) + config[key] = EasyDict() + merge_new_config(config[key], val) + else: + config[key] = val + continue + if key not in config: + config[key] = EasyDict() + merge_new_config(config[key], val) + return config +def cfg_from_yaml_file(cfg_file): + config = EasyDict() + with open(cfg_file, 'r') as f: + # try: + new_config = yaml.load(f, Loader=yaml.FullLoader) + # except: + # new_config = yaml.load(f) + merge_new_config(config=config, new_config=new_config) + return config + +def get_model(model): + if isinstance(model, torch.nn.DataParallel) \ + or isinstance(model, torch.nn.parallel.DistributedDataParallel): + return model.module + else: + return model + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(state, is_best, output_dir): + if is_main_process(): + ckpt_path = '{}/checkpoint_{}.pt'.format(output_dir, state['epoch']) + best_path = f'{output_dir}/checkpoint_best.pt' + torch.save(state, ckpt_path) + if is_best: + shutil.copyfile(ckpt_path, best_path) + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +def scaled_all_reduce(tensors, is_scale=True): + """Performs the scaled all_reduce operation on the provided tensors. + The input tensors are modified in-place. Currently supports only the sum + reduction operator. The reduced values are scaled by the inverse size of the + world size. + """ + world_size = get_world_size() + # There is no need for reduction in the single-proc case + if world_size == 1: + return tensors + # Queue the reductions + reductions = [] + for tensor in tensors: + reduction = dist.all_reduce(tensor, async_op=True) + reductions.append(reduction) + # Wait for reductions to finish + for reduction in reductions: + reduction.wait() + # Scale the results + if is_scale: + for tensor in tensors: + tensor.mul_(1.0 / world_size) + return tensors + + +def all_gather_batch(tensors): + """ + Performs all_gather operation on the provided tensors. + """ + # Queue the gathered tensors + world_size = get_world_size() + # There is no need for reduction in the single-proc case + if world_size == 1: + return tensors + tensor_list = [] + output_tensor = [] + for tensor in tensors: + tensor_all = [torch.ones_like(tensor) for _ in range(world_size)] + dist.all_gather( + tensor_all, + tensor, + async_op=False # performance opt + ) + + tensor_list.append(tensor_all) + + for tensor_all in tensor_list: + output_tensor.append(torch.cat(tensor_all, dim=0)) + return output_tensor + + +class GatherLayer(autograd.Function): + """ + Gather tensors from all workers with support for backward propagation: + This implementation does not cut the gradients as torch.distributed.all_gather does. + """ + + @staticmethod + def forward(ctx, x): + output = [torch.zeros_like(x) for _ in range(dist.get_world_size())] + dist.all_gather(output, x) + return tuple(output) + + @staticmethod + def backward(ctx, *grads): + all_gradients = torch.stack(grads) + dist.all_reduce(all_gradients) + return all_gradients[dist.get_rank()] + + +def all_gather_batch_with_grad(tensors): + """ + Performs all_gather operation on the provided tensors. + Graph remains connected for backward grad computation. + """ + # Queue the gathered tensors + world_size = get_world_size() + # There is no need for reduction in the single-proc case + if world_size == 1: + return tensors + tensor_list = [] + output_tensor = [] + + for tensor in tensors: + tensor_all = GatherLayer.apply(tensor) + tensor_list.append(tensor_all) + + for tensor_all in tensor_list: + output_tensor.append(torch.cat(tensor_all, dim=0)) + return output_tensor + + +def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0): + warmup_schedule = np.array([]) + warmup_iters = warmup_epochs * niter_per_ep + if warmup_epochs > 0: + warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters) + + iters = np.arange(epochs * niter_per_ep - warmup_iters) + schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters))) + + schedule = np.concatenate((warmup_schedule, schedule)) + assert len(schedule) == epochs * niter_per_ep + return schedule + + +class GaussianBlur(object): + """Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709""" + + def __init__(self, sigma=[.1, 2.]): + self.sigma = sigma + + def __call__(self, x): + sigma = random.uniform(self.sigma[0], self.sigma[1]) + x = x.filter(ImageFilter.GaussianBlur(radius=sigma)) + return x + +# def get_dataset(train_transform, tokenizer, args, dataset_name=None): +# dataset_3d = Dataset_3D(args, tokenizer, dataset_name, train_transform) +# return dataset_3d.dataset \ No newline at end of file