{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "from transformers import pipeline, Conversation\n", "from transformers import AutoTokenizer, AutoModelForCausalLM" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "# Loading\n", "tok = AutoTokenizer.from_pretrained(\"saved_model\")\n", "mod = AutoModelForCausalLM.from_pretrained(\"saved_model\")\n", "\n", "chatbot = pipeline(\"conversational\", model = mod, tokenizer = tok)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer.\n", "A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer.\n", "A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer.\n", "A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer.\n" ] }, { "data": { "text/plain": [ "Conversation id: 20e0c3eb-e549-4c61-96d5-831eb3af1933 \n", "user >> Hello \n", "bot >> Hi, I'm here to talk to you. \n", "user >> How are you? \n", "bot >> I'm fine. How are you? \n", "user >> I'm good, do you want to watch a movie today? \n", "bot >> Sure, I'll watch it. What movie? \n", "user >> What about Lalaland? \n", "bot >> That's a good one. I'll watch it. " ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "user_input = \"Hello\"\n", "conversation = Conversation(user_input)\n", "conversation = chatbot(conversation, pad_token_id=chatbot.tokenizer.eos_token_id)\n", "reply = conversation.generated_responses\n", "reply = reply[0].split(\" \")[0]\n", "conversation.generated_responses = [reply]\n", "\n", "conversation.add_user_input(\"How are you?\")\n", "conversation = chatbot(conversation, pad_token_id=chatbot.tokenizer.eos_token_id)\n", "conversation.add_user_input(\"I'm good, do you want to watch a movie today?\")\n", "conversation = chatbot(conversation, pad_token_id=chatbot.tokenizer.eos_token_id)\n", "conversation.add_user_input(\"What about Lalaland?\")\n", "conversation = chatbot(conversation, pad_token_id=chatbot.tokenizer.eos_token_id)\n", "\n", "conversation" ] } ], "metadata": { "kernelspec": { "display_name": ".venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.5" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 }