Add print statements
Browse files- modeling_cogvlm.py +25 -25
modeling_cogvlm.py
CHANGED
@@ -440,29 +440,29 @@ class CogVLMModel(CogVLMPreTrainedModel):
|
|
440 |
|
441 |
from huggingface_hub import HfApi
|
442 |
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
|
467 |
print("First values of text embeddings:", inputs_embeds[0, :3, :3])
|
468 |
print("First values of images_features:", images_features[0, :3])
|
@@ -557,14 +557,14 @@ class CogVLMModel(CogVLMPreTrainedModel):
|
|
557 |
|
558 |
hidden_states = inputs_embeds
|
559 |
|
560 |
-
torch.save(hidden_states, "
|
561 |
|
562 |
from huggingface_hub import HfApi
|
563 |
|
564 |
api = HfApi()
|
565 |
api.upload_file(
|
566 |
-
path_or_fileobj="
|
567 |
-
path_in_repo="
|
568 |
repo_id="nielsr/test-cogvlm",
|
569 |
repo_type="dataset",
|
570 |
)
|
|
|
440 |
|
441 |
from huggingface_hub import HfApi
|
442 |
|
443 |
+
torch.save(images_features, "images_features.pt")
|
444 |
+
torch.save(inputs_embeds, "inputs_embeds.pt")
|
445 |
+
torch.save(token_type_ids, "token_type_ids.pt")
|
446 |
|
447 |
+
api = HfApi()
|
448 |
+
api.upload_file(
|
449 |
+
path_or_fileobj="images_features.pt",
|
450 |
+
path_in_repo="images_features.pt",
|
451 |
+
repo_id="nielsr/test-cogvlm",
|
452 |
+
repo_type="dataset",
|
453 |
+
)
|
454 |
+
api.upload_file(
|
455 |
+
path_or_fileobj="inputs_embeds.pt",
|
456 |
+
path_in_repo="inputs_embeds.pt",
|
457 |
+
repo_id="nielsr/test-cogvlm",
|
458 |
+
repo_type="dataset",
|
459 |
+
)
|
460 |
+
api.upload_file(
|
461 |
+
path_or_fileobj="token_type_ids.pt",
|
462 |
+
path_in_repo="token_type_ids.pt",
|
463 |
+
repo_id="nielsr/test-cogvlm",
|
464 |
+
repo_type="dataset",
|
465 |
+
)
|
466 |
|
467 |
print("First values of text embeddings:", inputs_embeds[0, :3, :3])
|
468 |
print("First values of images_features:", images_features[0, :3])
|
|
|
557 |
|
558 |
hidden_states = inputs_embeds
|
559 |
|
560 |
+
torch.save(hidden_states, "initial_hidden_states.pt")
|
561 |
|
562 |
from huggingface_hub import HfApi
|
563 |
|
564 |
api = HfApi()
|
565 |
api.upload_file(
|
566 |
+
path_or_fileobj="initial_hidden_states.pt",
|
567 |
+
path_in_repo="initial_hidden_states.pt",
|
568 |
repo_id="nielsr/test-cogvlm",
|
569 |
repo_type="dataset",
|
570 |
)
|