KeyError: 'mistral'
add Codeadd Markdown
import pandas as pd
from transformers import AutoTokenizer, AutoModelForCausalLM
β
# Load model direc
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
β
# If you have a GPU available, use it for faster processing
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
β
# Process Titles: Load the dataset and extract all video titles
dataset_path = "/kaggle/input/youtube-watching/video_metadata.csv"
video_data = pd.read_csv(dataset_path)
video_titles = video_data['title'].tolist()
β
enhanced_titles = []
β
for title in video_titles:
model_inputs = tokenizer([title], return_tensors="pt").to(device)
generated_ids = model.generate(**model_inputs, max_length=100, do_sample=True)
generated_text = tokenizer.batch_decode(generated_ids)[0]
enhanced_titles.append(generated_text)
β
# Print or save the enhanced titles as needed
print(enhanced_titles)
Downloading (β¦)okenizer_config.json: 100%
1.43k/1.43k [00:00<00:00, 124kB/s]
Downloading tokenizer.model: 100%
493k/493k [00:00<00:00, 8.68MB/s]
Downloading (β¦)/main/tokenizer.json: 100%
1.80M/1.80M [00:00<00:00, 17.2MB/s]
Downloading (β¦)in/added_tokens.json: 100%
42.0/42.0 [00:00<00:00, 3.88kB/s]
Downloading (β¦)cial_tokens_map.json: 100%
168/168 [00:00<00:00, 15.6kB/s]
Downloading (β¦)lve/main/config.json: 100%
639/639 [00:00<00:00, 58.5kB/s]
KeyError Traceback (most recent call last)
Cell In[10], line 6
4 # Load model direc
5 tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
----> 6 model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
8 # If you have a GPU available, use it for faster processing
9 device = "cuda" if torch.cuda.is_available() else "cpu"
File /opt/conda/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py:527, in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
522 if kwargs.get("quantization_config", None) is not None:
523 _ = kwargs.pop("quantization_config")
525 config, kwargs = AutoConfig.from_pretrained(
526 pretrained_model_name_or_path,
--> 527 return_unused_kwargs=True,
528 trust_remote_code=trust_remote_code,
529 code_revision=code_revision,
530 _commit_hash=commit_hash,
531 **hub_kwargs,
532 **kwargs,
533 )
535 # if torch_dtype=auto was passed here, ensure to pass it on
536 if kwargs_orig.get("torch_dtype", None) == "auto":
File /opt/conda/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py:1041, in from_pretrained(cls, pretrained_model_name_or_path, **kwargs)
1036 has_local_code = "model_type" in config_dict and config_dict["model_type"] in CONFIG_MAPPING
1037 trust_remote_code = resolve_trust_remote_code(
1038 trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
1039 )
-> 1041 if has_remote_code and trust_remote_code:
1042 class_ref = config_dict["auto_map"]["AutoConfig"]
1043 config_class = get_class_from_dynamic_module(
1044 class_ref, pretrained_model_name_or_path, code_revision=code_revision, **kwargs
1045 )
File /opt/conda/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py:734, in getitem(self, key)
730 return key
731 return None
--> 734 class _LazyConfigMapping(OrderedDict):
735 """
736 A dictionary that lazily load its values when they are requested.
737 """
739 def init(self, mapping):
KeyError: 'mistral'
I am also getting the same error.
You need the latest version of transformers
to load this model, so I think if you run pip install -U transformers
the error should be resolved. Please let me know if the error persists though!
i have the latest version of transformers, yet still getting the KeyError 'mistral'. I try to run it on oracle linux server
Traceback (most recent call last):
File "load_model_and_generate.py", line 6, in
model = AutoModelForCausalLM.from_pretrained("./zephyr-7b-alpha")
File "/usr/local/lib/python3.6/site-packages/transformers/models/auto/auto_factory.py", line 424, in from_pretrained
pretrained_model_name_or_path, return_unused_kwargs=True, trust_remote_code=trust_remote_code, **kwargs
File "/usr/local/lib/python3.6/site-packages/transformers/models/auto/configuration_auto.py", line 672, in from_pretrained
config_class = CONFIG_MAPPING[config_dict["model_type"]]
File "/usr/local/lib/python3.6/site-packages/transformers/models/auto/configuration_auto.py", line 387, in getitem
raise KeyError(key)
KeyError: 'mistral'
i have the latest version of transformers, yet still getting the KeyError 'mistral'. I try to run it on oracle linux server
Traceback (most recent call last):
File "load_model_and_generate.py", line 6, in
model = AutoModelForCausalLM.from_pretrained("./zephyr-7b-alpha")
File "/usr/local/lib/python3.6/site-packages/transformers/models/auto/auto_factory.py", line 424, in from_pretrained
pretrained_model_name_or_path, return_unused_kwargs=True, trust_remote_code=trust_remote_code, **kwargs
File "/usr/local/lib/python3.6/site-packages/transformers/models/auto/configuration_auto.py", line 672, in from_pretrained
config_class = CONFIG_MAPPING[config_dict["model_type"]]
File "/usr/local/lib/python3.6/site-packages/transformers/models/auto/configuration_auto.py", line 387, in getitem
raise KeyError(key)
KeyError: 'mistral'
print your transformers version please, is it above 4.33?
It is working for me , using transformers version 4.34.1, so you need to upgrade your transformers version
!pip install --upgrade transformers accelerate