Spaces:
Sleeping
Sleeping
init commit
Browse files- .gitattributes +1 -0
- .gitignore +164 -0
- app.py +39 -0
- faiss_index/index.faiss +3 -0
- faiss_index/index.pkl +3 -0
- images/ai_assistant.png +0 -0
- images/logo.jpg +0 -0
- models/logo.jpg +0 -0
- notebooks/RAG_CLAUDE.ipynb +737 -0
- notebooks/logo.jpg +0 -0
- pages/RAG.py +123 -0
- pages/Summary.py +99 -0
- requirements.txt +14 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.faiss filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.DS_Store
|
2 |
+
|
3 |
+
# Byte-compiled / optimized / DLL files
|
4 |
+
__pycache__/
|
5 |
+
*.py[cod]
|
6 |
+
*$py.class
|
7 |
+
|
8 |
+
# C extensions
|
9 |
+
*.so
|
10 |
+
|
11 |
+
# Distribution / packaging
|
12 |
+
.Python
|
13 |
+
build/
|
14 |
+
develop-eggs/
|
15 |
+
dist/
|
16 |
+
downloads/
|
17 |
+
eggs/
|
18 |
+
.eggs/
|
19 |
+
lib/
|
20 |
+
lib64/
|
21 |
+
parts/
|
22 |
+
sdist/
|
23 |
+
var/
|
24 |
+
wheels/
|
25 |
+
share/python-wheels/
|
26 |
+
*.egg-info/
|
27 |
+
.installed.cfg
|
28 |
+
*.egg
|
29 |
+
MANIFEST
|
30 |
+
|
31 |
+
# PyInstaller
|
32 |
+
# Usually these files are written by a python script from a template
|
33 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
34 |
+
*.manifest
|
35 |
+
*.spec
|
36 |
+
|
37 |
+
# Installer logs
|
38 |
+
pip-log.txt
|
39 |
+
pip-delete-this-directory.txt
|
40 |
+
|
41 |
+
# Unit test / coverage reports
|
42 |
+
htmlcov/
|
43 |
+
.tox/
|
44 |
+
.nox/
|
45 |
+
.coverage
|
46 |
+
.coverage.*
|
47 |
+
.cache
|
48 |
+
nosetests.xml
|
49 |
+
coverage.xml
|
50 |
+
*.cover
|
51 |
+
*.py,cover
|
52 |
+
.hypothesis/
|
53 |
+
.pytest_cache/
|
54 |
+
cover/
|
55 |
+
|
56 |
+
# Translations
|
57 |
+
*.mo
|
58 |
+
*.pot
|
59 |
+
|
60 |
+
# Django stuff:
|
61 |
+
*.log
|
62 |
+
local_settings.py
|
63 |
+
db.sqlite3
|
64 |
+
db.sqlite3-journal
|
65 |
+
|
66 |
+
# Flask stuff:
|
67 |
+
instance/
|
68 |
+
.webassets-cache
|
69 |
+
|
70 |
+
# Scrapy stuff:
|
71 |
+
.scrapy
|
72 |
+
|
73 |
+
# Sphinx documentation
|
74 |
+
docs/_build/
|
75 |
+
|
76 |
+
# PyBuilder
|
77 |
+
.pybuilder/
|
78 |
+
target/
|
79 |
+
|
80 |
+
# Jupyter Notebook
|
81 |
+
.ipynb_checkpoints
|
82 |
+
|
83 |
+
# IPython
|
84 |
+
profile_default/
|
85 |
+
ipython_config.py
|
86 |
+
|
87 |
+
# pyenv
|
88 |
+
# For a library or package, you might want to ignore these files since the code is
|
89 |
+
# intended to run in multiple environments; otherwise, check them in:
|
90 |
+
# .python-version
|
91 |
+
|
92 |
+
# pipenv
|
93 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
94 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
95 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
96 |
+
# install all needed dependencies.
|
97 |
+
#Pipfile.lock
|
98 |
+
|
99 |
+
# poetry
|
100 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
101 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
102 |
+
# commonly ignored for libraries.
|
103 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
104 |
+
#poetry.lock
|
105 |
+
|
106 |
+
# pdm
|
107 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
108 |
+
#pdm.lock
|
109 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
110 |
+
# in version control.
|
111 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
112 |
+
.pdm.toml
|
113 |
+
.pdm-python
|
114 |
+
.pdm-build/
|
115 |
+
|
116 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
117 |
+
__pypackages__/
|
118 |
+
|
119 |
+
# Celery stuff
|
120 |
+
celerybeat-schedule
|
121 |
+
celerybeat.pid
|
122 |
+
|
123 |
+
# SageMath parsed files
|
124 |
+
*.sage.py
|
125 |
+
|
126 |
+
# Environments
|
127 |
+
.env
|
128 |
+
.venv
|
129 |
+
env/
|
130 |
+
venv/
|
131 |
+
ENV/
|
132 |
+
env.bak/
|
133 |
+
venv.bak/
|
134 |
+
|
135 |
+
# Spyder project settings
|
136 |
+
.spyderproject
|
137 |
+
.spyproject
|
138 |
+
|
139 |
+
# Rope project settings
|
140 |
+
.ropeproject
|
141 |
+
|
142 |
+
# mkdocs documentation
|
143 |
+
/site
|
144 |
+
|
145 |
+
# mypy
|
146 |
+
.mypy_cache/
|
147 |
+
.dmypy.json
|
148 |
+
dmypy.json
|
149 |
+
|
150 |
+
# Pyre type checker
|
151 |
+
.pyre/
|
152 |
+
|
153 |
+
# pytype static type analyzer
|
154 |
+
.pytype/
|
155 |
+
|
156 |
+
# Cython debug symbols
|
157 |
+
cython_debug/
|
158 |
+
|
159 |
+
# PyCharm
|
160 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
161 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
162 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
163 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
164 |
+
#.idea/
|
app.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
|
4 |
+
# Заголовок страницы
|
5 |
+
st.set_page_config(page_title="AI Knowledge Assistant", page_icon="🤖", layout="wide")
|
6 |
+
|
7 |
+
# Создание колонок для размещения текста и изображения
|
8 |
+
col1, col2 = st.columns([2, 1])
|
9 |
+
|
10 |
+
# Контент первой колонки (основной текст)
|
11 |
+
with col1:
|
12 |
+
st.title("AI Knowledge Assistant 🤖")
|
13 |
+
st.markdown("""
|
14 |
+
## Добро пожаловать в вашу персональную AI-систему для вопросов по машинному обучению!
|
15 |
+
|
16 |
+
Этот проект состоит из двух мощных инструментов:
|
17 |
+
- **Первая страница:** Система Retrieval-Augmented Generation (RAG), которая отвечает на вопросы, используя базу знаний, собранную из транскриптов YouTube-видео по машинному обучению.
|
18 |
+
- **Вторая страница:** Инструмент для создания текстовых саммари по видео. Просто введите ссылку на YouTube и выберите тип саммари, чтобы получить краткое содержание.
|
19 |
+
|
20 |
+
### Начните с первой страницы и получите ответы на вопросы по машинному обучению, или перейдите на вторую страницу для саммари видео!
|
21 |
+
""")
|
22 |
+
|
23 |
+
# Кнопка для перехода к первому инструменту
|
24 |
+
st.button("Перейти к системе RAG")
|
25 |
+
|
26 |
+
# Контент второй колонки (изображение)
|
27 |
+
with col2:
|
28 |
+
image = Image.open('images/ai_assistant.png') # Загрузите изображение для более привлекательного вида
|
29 |
+
st.image(image, caption="AI Knowledge Assistant", use_column_width=True)
|
30 |
+
|
31 |
+
# Линия для разделения контента
|
32 |
+
st.markdown("---")
|
33 |
+
|
34 |
+
# Добавим небольшой футер с информацией о проекте
|
35 |
+
st.markdown("""
|
36 |
+
<div style='text-align: center; color: gray;'>
|
37 |
+
<small>Этот проект создан для помощи в изучении машинного обучения, предоставляя ответы на основе видео и облегчая получение информации с YouTube. Разработано с любовью к AI и Data Science.</small>
|
38 |
+
</div>
|
39 |
+
""", unsafe_allow_html=True)
|
faiss_index/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de459a6f0c348c619bb6047c8a971982b144613a4972378b4fb3fdc08b2aafca
|
3 |
+
size 60248109
|
faiss_index/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e7304d015bef42ff434db790a358dd592bc6cd0c728364102a6c2fd09f431d19
|
3 |
+
size 16687971
|
images/ai_assistant.png
ADDED
images/logo.jpg
ADDED
models/logo.jpg
ADDED
notebooks/RAG_CLAUDE.ipynb
ADDED
@@ -0,0 +1,737 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {
|
6 |
+
"id": "dvidSKA14fhf"
|
7 |
+
},
|
8 |
+
"source": [
|
9 |
+
"##Установка необходимых библиотек"
|
10 |
+
]
|
11 |
+
},
|
12 |
+
{
|
13 |
+
"cell_type": "code",
|
14 |
+
"execution_count": 24,
|
15 |
+
"metadata": {
|
16 |
+
"colab": {
|
17 |
+
"base_uri": "https://localhost:8080/"
|
18 |
+
},
|
19 |
+
"id": "YImwMQLjASiK",
|
20 |
+
"outputId": "1177bd7c-e220-4f5f-e75f-24c2a7556604"
|
21 |
+
},
|
22 |
+
"outputs": [
|
23 |
+
{
|
24 |
+
"name": "stderr",
|
25 |
+
"output_type": "stream",
|
26 |
+
"text": [
|
27 |
+
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
|
28 |
+
"To disable this warning, you can either:\n",
|
29 |
+
"\t- Avoid using `tokenizers` before the fork if possible\n",
|
30 |
+
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
|
31 |
+
]
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"name": "stdout",
|
35 |
+
"output_type": "stream",
|
36 |
+
"text": [
|
37 |
+
"\n",
|
38 |
+
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.2\u001b[0m\n",
|
39 |
+
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
|
40 |
+
]
|
41 |
+
}
|
42 |
+
],
|
43 |
+
"source": [
|
44 |
+
"!pip install faiss-cpu sentence-transformers langchain langchain-community anthropic youtube-transcript-api -q\n"
|
45 |
+
]
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"cell_type": "code",
|
49 |
+
"execution_count": 25,
|
50 |
+
"metadata": {},
|
51 |
+
"outputs": [
|
52 |
+
{
|
53 |
+
"name": "stderr",
|
54 |
+
"output_type": "stream",
|
55 |
+
"text": [
|
56 |
+
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
|
57 |
+
"To disable this warning, you can either:\n",
|
58 |
+
"\t- Avoid using `tokenizers` before the fork if possible\n",
|
59 |
+
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
|
60 |
+
]
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"name": "stdout",
|
64 |
+
"output_type": "stream",
|
65 |
+
"text": [
|
66 |
+
"Requirement already satisfied: google-api-python-client in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (2.144.0)\n",
|
67 |
+
"Requirement already satisfied: httplib2<1.dev0,>=0.19.0 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-python-client) (0.22.0)\n",
|
68 |
+
"Requirement already satisfied: google-auth!=2.24.0,!=2.25.0,<3.0.0.dev0,>=1.32.0 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-python-client) (2.34.0)\n",
|
69 |
+
"Requirement already satisfied: google-auth-httplib2<1.0.0,>=0.2.0 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-python-client) (0.2.0)\n",
|
70 |
+
"Requirement already satisfied: google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-python-client) (2.19.2)\n",
|
71 |
+
"Requirement already satisfied: uritemplate<5,>=3.0.1 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-python-client) (4.1.1)\n",
|
72 |
+
"Requirement already satisfied: googleapis-common-protos<2.0.dev0,>=1.56.2 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (1.65.0)\n",
|
73 |
+
"Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0.dev0,>=3.19.5 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (5.28.0)\n",
|
74 |
+
"Requirement already satisfied: proto-plus<2.0.0dev,>=1.22.3 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (1.24.0)\n",
|
75 |
+
"Requirement already satisfied: requests<3.0.0.dev0,>=2.18.0 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (2.32.3)\n",
|
76 |
+
"Requirement already satisfied: cachetools<6.0,>=2.0.0 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-auth!=2.24.0,!=2.25.0,<3.0.0.dev0,>=1.32.0->google-api-python-client) (5.5.0)\n",
|
77 |
+
"Requirement already satisfied: pyasn1-modules>=0.2.1 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-auth!=2.24.0,!=2.25.0,<3.0.0.dev0,>=1.32.0->google-api-python-client) (0.4.0)\n",
|
78 |
+
"Requirement already satisfied: rsa<5,>=3.1.4 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-auth!=2.24.0,!=2.25.0,<3.0.0.dev0,>=1.32.0->google-api-python-client) (4.9)\n",
|
79 |
+
"Requirement already satisfied: pyparsing!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from httplib2<1.dev0,>=0.19.0->google-api-python-client) (3.1.4)\n",
|
80 |
+
"Requirement already satisfied: pyasn1<0.7.0,>=0.4.6 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from pyasn1-modules>=0.2.1->google-auth!=2.24.0,!=2.25.0,<3.0.0.dev0,>=1.32.0->google-api-python-client) (0.6.0)\n",
|
81 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (3.3.2)\n",
|
82 |
+
"Requirement already satisfied: idna<4,>=2.5 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (3.8)\n",
|
83 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (2.2.2)\n",
|
84 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (2024.8.30)\n",
|
85 |
+
"\n",
|
86 |
+
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.2\u001b[0m\n",
|
87 |
+
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
88 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
89 |
+
]
|
90 |
+
}
|
91 |
+
],
|
92 |
+
"source": [
|
93 |
+
"pip install --upgrade google-api-python-client"
|
94 |
+
]
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"cell_type": "code",
|
98 |
+
"execution_count": null,
|
99 |
+
"metadata": {
|
100 |
+
"id": "nL9Vg7lgA8nt"
|
101 |
+
},
|
102 |
+
"outputs": [],
|
103 |
+
"source": [
|
104 |
+
"# from langchain_community.embeddings import HuggingFaceEmbeddings\n",
|
105 |
+
"# from langchain_community.vectorstores import FAISS\n",
|
106 |
+
"# from langchain import Anthropic, LLMChain\n",
|
107 |
+
"# from langchain.chains.combine_documents import create_stuff_documents_chain\n",
|
108 |
+
"# from langchain.chains import create_retrieval_chain"
|
109 |
+
]
|
110 |
+
},
|
111 |
+
{
|
112 |
+
"cell_type": "markdown",
|
113 |
+
"metadata": {
|
114 |
+
"id": "Iav_VBRN4xr2"
|
115 |
+
},
|
116 |
+
"source": [
|
117 |
+
"##Создаем транскрипты 3х плейлистов используя ютуб апи\n"
|
118 |
+
]
|
119 |
+
},
|
120 |
+
{
|
121 |
+
"cell_type": "code",
|
122 |
+
"execution_count": 26,
|
123 |
+
"metadata": {
|
124 |
+
"id": "sFYWNeL-4xIr"
|
125 |
+
},
|
126 |
+
"outputs": [],
|
127 |
+
"source": [
|
128 |
+
"from youtube_transcript_api import YouTubeTranscriptApi\n",
|
129 |
+
"from googleapiclient.discovery import build\n",
|
130 |
+
"\n",
|
131 |
+
"api_key = \"Youtube_api\"\n",
|
132 |
+
"\n",
|
133 |
+
"\n",
|
134 |
+
"def get_playlist_video_ids(playlist_id, api_key):\n",
|
135 |
+
" youtube = build('youtube', 'v3', developerKey=api_key)\n",
|
136 |
+
"\n",
|
137 |
+
" video_ids = []\n",
|
138 |
+
" next_page_token = None\n",
|
139 |
+
"\n",
|
140 |
+
" while True:\n",
|
141 |
+
" # Получаем список видео в плейлисте\n",
|
142 |
+
" request = youtube.playlistItems().list(\n",
|
143 |
+
" part=\"contentDetails\",\n",
|
144 |
+
" playlistId=playlist_id,\n",
|
145 |
+
" maxResults=50, # Максимальное количество видео, которое можно получить за один запрос\n",
|
146 |
+
" pageToken=next_page_token\n",
|
147 |
+
" )\n",
|
148 |
+
" response = request.execute()\n",
|
149 |
+
"\n",
|
150 |
+
" # Добавляем video_id в список\n",
|
151 |
+
" video_ids.extend([item['contentDetails']['videoId'] for item in response['items']])\n",
|
152 |
+
"\n",
|
153 |
+
" # pagination\n",
|
154 |
+
" next_page_token = response.get('nextPageToken')\n",
|
155 |
+
" \n",
|
156 |
+
" if not next_page_token:\n",
|
157 |
+
" break\n",
|
158 |
+
"\n",
|
159 |
+
" return video_ids\n"
|
160 |
+
]
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"cell_type": "code",
|
164 |
+
"execution_count": 27,
|
165 |
+
"metadata": {},
|
166 |
+
"outputs": [
|
167 |
+
{
|
168 |
+
"data": {
|
169 |
+
"text/plain": [
|
170 |
+
"{'kind': 'youtube#playlistItemListResponse',\n",
|
171 |
+
" 'etag': '0wxbScWJEx_DaocUEV-JDgNAMHA',\n",
|
172 |
+
" 'items': [{'kind': 'youtube#playlistItem',\n",
|
173 |
+
" 'etag': 'U_uuou2Zq_IzljATHIKBnpq5pF0',\n",
|
174 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS41NkI0NEY2RDEwNTU3Q0M2',\n",
|
175 |
+
" 'contentDetails': {'videoId': 'z9ccH9e5cAw',\n",
|
176 |
+
" 'videoPublishedAt': '2024-06-24T09:00:05Z'}},\n",
|
177 |
+
" {'kind': 'youtube#playlistItem',\n",
|
178 |
+
" 'etag': 'NoecQqYGI39FM6InP5iIscg7lHE',\n",
|
179 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4yODlGNEE0NkRGMEEzMEQy',\n",
|
180 |
+
" 'contentDetails': {'videoId': 'ff-S_tjr1OI',\n",
|
181 |
+
" 'videoPublishedAt': '2024-06-25T08:51:18Z'}},\n",
|
182 |
+
" {'kind': 'youtube#playlistItem',\n",
|
183 |
+
" 'etag': 'TfPjzS1U2WEiC-qB0ncl9JnJqiI',\n",
|
184 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4wMTcyMDhGQUE4NTIzM0Y5',\n",
|
185 |
+
" 'contentDetails': {'videoId': 'T_NW1nlq3ic',\n",
|
186 |
+
" 'videoPublishedAt': '2024-06-26T08:58:11Z'}},\n",
|
187 |
+
" {'kind': 'youtube#playlistItem',\n",
|
188 |
+
" 'etag': 'MDquxrdeaV3UkCjADQYdR_8CRYE',\n",
|
189 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4wOTA3OTZBNzVEMTUzOTMy',\n",
|
190 |
+
" 'contentDetails': {'videoId': 'sTVWtYORqjU',\n",
|
191 |
+
" 'videoPublishedAt': '2024-06-27T11:10:12Z'}},\n",
|
192 |
+
" {'kind': 'youtube#playlistItem',\n",
|
193 |
+
" 'etag': 'Sftasrd9qUNA6VmlBG_mk5Vh3KE',\n",
|
194 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4xMkVGQjNCMUM1N0RFNEUx',\n",
|
195 |
+
" 'contentDetails': {'videoId': '06rbC2eMXy0',\n",
|
196 |
+
" 'videoPublishedAt': '2024-07-01T08:31:49Z'}},\n",
|
197 |
+
" {'kind': 'youtube#playlistItem',\n",
|
198 |
+
" 'etag': 'xxuE04WGmzx-zfjSMW6ZwvrO0qs',\n",
|
199 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS41MzJCQjBCNDIyRkJDN0VD',\n",
|
200 |
+
" 'contentDetails': {'videoId': 'qeqzWqWxTog',\n",
|
201 |
+
" 'videoPublishedAt': '2024-07-01T08:44:27Z'}},\n",
|
202 |
+
" {'kind': 'youtube#playlistItem',\n",
|
203 |
+
" 'etag': 'vHgQ6ae0zxARGd72v_-ex3CspUk',\n",
|
204 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS5DQUNERDQ2NkIzRUQxNTY1',\n",
|
205 |
+
" 'contentDetails': {'videoId': 'DyL2uSTDumY',\n",
|
206 |
+
" 'videoPublishedAt': '2024-07-02T08:42:57Z'}},\n",
|
207 |
+
" {'kind': 'youtube#playlistItem',\n",
|
208 |
+
" 'etag': '-l0vvdpMi4ZuLZ7K3781PewokHY',\n",
|
209 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS45NDk1REZENzhEMzU5MDQz',\n",
|
210 |
+
" 'contentDetails': {'videoId': 'isiNNDXiRYY',\n",
|
211 |
+
" 'videoPublishedAt': '2024-07-03T08:39:09Z'}},\n",
|
212 |
+
" {'kind': 'youtube#playlistItem',\n",
|
213 |
+
" 'etag': '9_B45Ia97bKTpwyzAXI_pjcr55M',\n",
|
214 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS5GNjNDRDREMDQxOThCMDQ2',\n",
|
215 |
+
" 'contentDetails': {'videoId': 'AoUF4DtdV24',\n",
|
216 |
+
" 'videoPublishedAt': '2024-07-04T08:15:49Z'}},\n",
|
217 |
+
" {'kind': 'youtube#playlistItem',\n",
|
218 |
+
" 'etag': 'kpIXTBdaOPE2S8_jCoZqQTDw934',\n",
|
219 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS5EMEEwRUY5M0RDRTU3NDJC',\n",
|
220 |
+
" 'contentDetails': {'videoId': '5zORIoqJkF4',\n",
|
221 |
+
" 'videoPublishedAt': '2024-07-04T15:32:43Z'}},\n",
|
222 |
+
" {'kind': 'youtube#playlistItem',\n",
|
223 |
+
" 'etag': 'b7eqdiletjAJV9GRuLjxXS4T3NA',\n",
|
224 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS45ODRDNTg0QjA4NkFBNkQy',\n",
|
225 |
+
" 'contentDetails': {'videoId': 'JX8cGs4uC2Y',\n",
|
226 |
+
" 'videoPublishedAt': '2024-07-05T08:10:57Z'}},\n",
|
227 |
+
" {'kind': 'youtube#playlistItem',\n",
|
228 |
+
" 'etag': 'Yie-XxEUmvDzc1Y71RtE3io-qCg',\n",
|
229 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4zMDg5MkQ5MEVDMEM1NTg2',\n",
|
230 |
+
" 'contentDetails': {'videoId': 'lExBtpri2oU',\n",
|
231 |
+
" 'videoPublishedAt': '2024-07-08T10:31:02Z'}},\n",
|
232 |
+
" {'kind': 'youtube#playlistItem',\n",
|
233 |
+
" 'etag': 'd-axDR7ToQUxNX8mp_tbV4HUdTc',\n",
|
234 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS41Mzk2QTAxMTkzNDk4MDhF',\n",
|
235 |
+
" 'contentDetails': {'videoId': 'ur5hgkStOCg',\n",
|
236 |
+
" 'videoPublishedAt': '2024-07-08T10:31:49Z'}},\n",
|
237 |
+
" {'kind': 'youtube#playlistItem',\n",
|
238 |
+
" 'etag': 'Uci8akxtRTs55XtlswkBrdGmJF8',\n",
|
239 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS5EQUE1NTFDRjcwMDg0NEMz',\n",
|
240 |
+
" 'contentDetails': {'videoId': 'vTVjtDgmY9M',\n",
|
241 |
+
" 'videoPublishedAt': '2024-07-09T09:07:02Z'}},\n",
|
242 |
+
" {'kind': 'youtube#playlistItem',\n",
|
243 |
+
" 'etag': 'Y4Hqj4hcljCA6z4-acwwdrDMOe0',\n",
|
244 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS41QTY1Q0UxMTVCODczNThE',\n",
|
245 |
+
" 'contentDetails': {'videoId': 'AbimRQHQY4A',\n",
|
246 |
+
" 'videoPublishedAt': '2024-07-10T08:45:18Z'}},\n",
|
247 |
+
" {'kind': 'youtube#playlistItem',\n",
|
248 |
+
" 'etag': 'ImpiFamIx3naHHTqbzpgHdrlsMM',\n",
|
249 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4yMUQyQTQzMjRDNzMyQTMy',\n",
|
250 |
+
" 'contentDetails': {'videoId': 'nopExGduRHc',\n",
|
251 |
+
" 'videoPublishedAt': '2024-07-12T07:00:41Z'}},\n",
|
252 |
+
" {'kind': 'youtube#playlistItem',\n",
|
253 |
+
" 'etag': '4cagmU4UdNCuukYPZHQKFE0AV-k',\n",
|
254 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS45RTgxNDRBMzUwRjQ0MDhC',\n",
|
255 |
+
" 'contentDetails': {'videoId': 'ag4zmHI7QQM',\n",
|
256 |
+
" 'videoPublishedAt': '2024-07-15T08:14:16Z'}},\n",
|
257 |
+
" {'kind': 'youtube#playlistItem',\n",
|
258 |
+
" 'etag': '29kjP__km-l-aOW8zEJ_hsul3r8',\n",
|
259 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS5ENDU4Q0M4RDExNzM1Mjcy',\n",
|
260 |
+
" 'contentDetails': {'videoId': 'nQl1KC0yNrw',\n",
|
261 |
+
" 'videoPublishedAt': '2024-07-16T07:36:39Z'}},\n",
|
262 |
+
" {'kind': 'youtube#playlistItem',\n",
|
263 |
+
" 'etag': 'KjXXwlnAuUfvD-gK1k2jPFHtqJU',\n",
|
264 |
+
" 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4yMDhBMkNBNjRDMjQxQTg1',\n",
|
265 |
+
" 'contentDetails': {'videoId': '0BHc_kJoDEY',\n",
|
266 |
+
" 'videoPublishedAt': '2024-07-17T09:04:26Z'}}],\n",
|
267 |
+
" 'pageInfo': {'totalResults': 19, 'resultsPerPage': 50}}"
|
268 |
+
]
|
269 |
+
},
|
270 |
+
"execution_count": 27,
|
271 |
+
"metadata": {},
|
272 |
+
"output_type": "execute_result"
|
273 |
+
}
|
274 |
+
],
|
275 |
+
"source": [
|
276 |
+
"#check output\n",
|
277 |
+
"youtube = build('youtube', 'v3', developerKey=api_key)\n",
|
278 |
+
"request = youtube.playlistItems().list(\n",
|
279 |
+
" part=\"contentDetails\",\n",
|
280 |
+
" playlistId='PLYSHtNPbAINnbqXjIbN-c7DorjCT6eYOQ',\n",
|
281 |
+
" maxResults=50, # Максимальное количество видео, которое можно получить за один запрос\n",
|
282 |
+
" # pageToken=response.get('nextPageToken')\n",
|
283 |
+
" )\n",
|
284 |
+
"result1 = request.execute()\n",
|
285 |
+
"result_pagetoken = result1.get('nextPageToken')\n",
|
286 |
+
"result1\n",
|
287 |
+
"#result_pagetoken - nothing\n"
|
288 |
+
]
|
289 |
+
},
|
290 |
+
{
|
291 |
+
"cell_type": "markdown",
|
292 |
+
"metadata": {},
|
293 |
+
"source": [
|
294 |
+
"### EN transcripts all"
|
295 |
+
]
|
296 |
+
},
|
297 |
+
{
|
298 |
+
"cell_type": "code",
|
299 |
+
"execution_count": 29,
|
300 |
+
"metadata": {},
|
301 |
+
"outputs": [],
|
302 |
+
"source": [
|
303 |
+
"\n",
|
304 |
+
"def get_transcript_en(video_id, language_code='en'):\n",
|
305 |
+
" try:\n",
|
306 |
+
" transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=[language_code])\n",
|
307 |
+
" transcript_text = \" \".join([entry['text'] for entry in transcript])\n",
|
308 |
+
" return transcript_text\n",
|
309 |
+
" except Exception as e:\n",
|
310 |
+
" return str(e)\n",
|
311 |
+
"\n",
|
312 |
+
"def get_video_details(video_id, api_key):\n",
|
313 |
+
" youtube = build('youtube', 'v3', developerKey=api_key)\n",
|
314 |
+
"\n",
|
315 |
+
" # Получаем информацию о видео\n",
|
316 |
+
" request = youtube.videos().list(\n",
|
317 |
+
" part=\"snippet\",\n",
|
318 |
+
" id=video_id\n",
|
319 |
+
" )\n",
|
320 |
+
" response = request.execute()\n",
|
321 |
+
"\n",
|
322 |
+
" if 'items' in response and len(response['items']) > 0:\n",
|
323 |
+
" return response['items'][0]['snippet']['title']\n",
|
324 |
+
" else:\n",
|
325 |
+
" return None\n",
|
326 |
+
"\n",
|
327 |
+
"def get_playlist_transcripts_en(playlist_url, api_key, language_code='en'):\n",
|
328 |
+
" # Извлекаем playlist_id из URL\n",
|
329 |
+
" playlist_id = playlist_url.split(\"list=\")[-1]\n",
|
330 |
+
"\n",
|
331 |
+
" # Получаем все video_id из плейлиста\n",
|
332 |
+
" video_ids = get_playlist_video_ids(playlist_id, api_key)\n",
|
333 |
+
"\n",
|
334 |
+
" transcripts = []\n",
|
335 |
+
"\n",
|
336 |
+
" # Проходимся по всем видео и получаем транскрипты\n",
|
337 |
+
" for video_id in video_ids:\n",
|
338 |
+
" video_title = get_video_details(video_id, api_key)\n",
|
339 |
+
" transcript = get_transcript_en(video_id, language_code)\n",
|
340 |
+
" transcripts.append({'title': video_title, 'transcript': transcript})\n",
|
341 |
+
"\n",
|
342 |
+
" return transcripts\n",
|
343 |
+
"\n"
|
344 |
+
]
|
345 |
+
},
|
346 |
+
{
|
347 |
+
"cell_type": "code",
|
348 |
+
"execution_count": 32,
|
349 |
+
"metadata": {},
|
350 |
+
"outputs": [],
|
351 |
+
"source": [
|
352 |
+
"# Sources:\n",
|
353 |
+
"playlist_ml_en = \"https://www.youtube.com/watch?v=Gv9_4yMHFhI&list=PLblh5JKOoLUICTaGLRoHQDuF_7q2GfuJF\" \n",
|
354 |
+
"playlist_logistic_en = \"https://www.youtube.com/watch?v=yIYKR4sgzI8&list=PLblh5JKOoLUKxzEP5HA2d-Li7IJkHfXSe\" \n",
|
355 |
+
"playlist_nn_en = \"https://www.youtube.com/watch?v=zxagGtF9MeU&list=PLblh5JKOoLUIxGDQs4LFFD--41Vzf-ME1\" \n",
|
356 |
+
"playlist_stat_en = \"https://www.youtube.com/watch?v=qBigTkBLU6g&list=PLblh5JKOoLUK0FLuzwntyYI10UQFUhsY9\" \n",
|
357 |
+
"playlist_nn2_en = \"https://www.youtube.com/playlist?list=PLZHQObOWTQDNU6R1_67000Dx_ZCJB-3pi\" \n",
|
358 |
+
"playlist_linal2_en = \"https://www.youtube.com/playlist?list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab\" \n"
|
359 |
+
]
|
360 |
+
},
|
361 |
+
{
|
362 |
+
"cell_type": "code",
|
363 |
+
"execution_count": 31,
|
364 |
+
"metadata": {
|
365 |
+
"id": "QYmaFtk_5O6C"
|
366 |
+
},
|
367 |
+
"outputs": [],
|
368 |
+
"source": [
|
369 |
+
"transcripts_ML_en = get_playlist_transcripts_en(playlist_ml_en, api_key, 'en')\n",
|
370 |
+
"# 2min 8 sec"
|
371 |
+
]
|
372 |
+
},
|
373 |
+
{
|
374 |
+
"cell_type": "code",
|
375 |
+
"execution_count": 33,
|
376 |
+
"metadata": {},
|
377 |
+
"outputs": [],
|
378 |
+
"source": [
|
379 |
+
"# tier 2\n",
|
380 |
+
"transcripts_logistic_en = get_playlist_transcripts_en(playlist_logistic_en, api_key, 'en')\n",
|
381 |
+
"transcripts_NN_en = get_playlist_transcripts_en(playlist_nn_en, api_key, 'en')\n",
|
382 |
+
"transcripts_stat_en = get_playlist_transcripts_en(playlist_stat_en, api_key, 'en')"
|
383 |
+
]
|
384 |
+
},
|
385 |
+
{
|
386 |
+
"cell_type": "code",
|
387 |
+
"execution_count": 34,
|
388 |
+
"metadata": {},
|
389 |
+
"outputs": [],
|
390 |
+
"source": [
|
391 |
+
"# tier 3\n",
|
392 |
+
"transcripts_nn2_en = get_playlist_transcripts_en(playlist_nn2_en, api_key, 'en')\n",
|
393 |
+
"transcripts_linal2_en = get_playlist_transcripts_en(playlist_linal2_en, api_key, 'en')"
|
394 |
+
]
|
395 |
+
},
|
396 |
+
{
|
397 |
+
"cell_type": "markdown",
|
398 |
+
"metadata": {},
|
399 |
+
"source": [
|
400 |
+
"### RU transcripts all"
|
401 |
+
]
|
402 |
+
},
|
403 |
+
{
|
404 |
+
"cell_type": "code",
|
405 |
+
"execution_count": 36,
|
406 |
+
"metadata": {},
|
407 |
+
"outputs": [],
|
408 |
+
"source": [
|
409 |
+
"\n",
|
410 |
+
"def get_transcript_ru(video_id, language_code='ru'):\n",
|
411 |
+
" try:\n",
|
412 |
+
" transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=[language_code])\n",
|
413 |
+
" transcript_text = \" \".join([entry['text'] for entry in transcript])\n",
|
414 |
+
" return transcript_text\n",
|
415 |
+
" except Exception as e:\n",
|
416 |
+
" return str(e)\n",
|
417 |
+
"\n",
|
418 |
+
"def get_video_details(video_id, api_key):\n",
|
419 |
+
" youtube = build('youtube', 'v3', developerKey=api_key)\n",
|
420 |
+
"\n",
|
421 |
+
" # Получаем информацию о видео\n",
|
422 |
+
" request = youtube.videos().list(\n",
|
423 |
+
" part=\"snippet\",\n",
|
424 |
+
" id=video_id\n",
|
425 |
+
" )\n",
|
426 |
+
" response = request.execute()\n",
|
427 |
+
"\n",
|
428 |
+
" if 'items' in response and len(response['items']) > 0:\n",
|
429 |
+
" return response['items'][0]['snippet']['title']\n",
|
430 |
+
" else:\n",
|
431 |
+
" return None\n",
|
432 |
+
"\n",
|
433 |
+
"def get_playlist_transcripts_ru(playlist_url, api_key, language_code='ru'):\n",
|
434 |
+
" # Извлекаем playlist_id из URL\n",
|
435 |
+
" playlist_id = playlist_url.split(\"list=\")[-1]\n",
|
436 |
+
"\n",
|
437 |
+
" # Получаем все video_id из плейлиста\n",
|
438 |
+
" video_ids = get_playlist_video_ids(playlist_id, api_key)\n",
|
439 |
+
"\n",
|
440 |
+
" transcripts = []\n",
|
441 |
+
"\n",
|
442 |
+
" # Проходимся по всем видео и получаем транскрипты\n",
|
443 |
+
" for video_id in video_ids:\n",
|
444 |
+
" video_title = get_video_details(video_id, api_key)\n",
|
445 |
+
" transcript = get_transcript_ru(video_id, language_code)\n",
|
446 |
+
" transcripts.append({'title': video_title, 'transcript': transcript})\n",
|
447 |
+
"\n",
|
448 |
+
" return transcripts\n"
|
449 |
+
]
|
450 |
+
},
|
451 |
+
{
|
452 |
+
"cell_type": "code",
|
453 |
+
"execution_count": 40,
|
454 |
+
"metadata": {},
|
455 |
+
"outputs": [],
|
456 |
+
"source": [
|
457 |
+
"# Sources Elbrus \n",
|
458 |
+
"playlist_phase_1_url = \"https://www.youtube.com/playlist?list=PLYSHtNPbAINnbqXjIbN-c7DorjCT6eYOQ\" \n",
|
459 |
+
"playlist_phase_2_url = 'https://www.youtube.com/playlist?list=PLYSHtNPbAINnNvDXtGNmC7-F1QRH7qTgb'\n",
|
460 |
+
"playlist_phase_3_url = 'https://www.youtube.com/playlist?list=PLYSHtNPbAINlmyNNmTaqcn3BsaY8v1xgV'\n",
|
461 |
+
"\n",
|
462 |
+
"# Sources except Bootcamp:\n",
|
463 |
+
"playlist_NN_ru = 'https://www.youtube.com/playlist?list=PL0Ks75aof3Tiru-UvOvYmXzD1tU0NrR8V'\n",
|
464 |
+
"playlist_OOP_ru = 'https://www.youtube.com/watch?v=Z7AY41tE-3U&list=PLA0M1Bcd0w8zPwP7t-FgwONhZOHt9rz9E'\n",
|
465 |
+
"playlist_linal_ru = 'https://youtube.com/playlist?list=PLAQWsvWQlb6cIRY6yJtYnXCbxLxPZv6-Z'\n",
|
466 |
+
"playlist_docker_ru = 'https://www.youtube.com/watch?v=jVV8CVURmrE&list=PLqVeG_R3qMSwjnkMUns_Yc4zF_PtUZmB-'\n"
|
467 |
+
]
|
468 |
+
},
|
469 |
+
{
|
470 |
+
"cell_type": "code",
|
471 |
+
"execution_count": 38,
|
472 |
+
"metadata": {
|
473 |
+
"id": "SojIu0NP5VNt"
|
474 |
+
},
|
475 |
+
"outputs": [],
|
476 |
+
"source": [
|
477 |
+
"# Elbrus\n",
|
478 |
+
"transcripts_phase_1 = get_playlist_transcripts_ru(playlist_phase_1_url, api_key, 'ru')\n",
|
479 |
+
"transcripts_phase_2 = get_playlist_transcripts_ru(playlist_phase_2_url, api_key, 'ru')\n",
|
480 |
+
"transcripts_phase_3 = get_playlist_transcripts_ru(playlist_phase_3_url, api_key, 'ru')"
|
481 |
+
]
|
482 |
+
},
|
483 |
+
{
|
484 |
+
"cell_type": "code",
|
485 |
+
"execution_count": 41,
|
486 |
+
"metadata": {},
|
487 |
+
"outputs": [],
|
488 |
+
"source": [
|
489 |
+
"# other Ru\n",
|
490 |
+
"transcripts_NN_ru = get_playlist_transcripts_ru(playlist_NN_ru, api_key, 'ru')\n",
|
491 |
+
"transcripts_OOP_ru = get_playlist_transcripts_ru(playlist_OOP_ru, api_key, 'ru')\n",
|
492 |
+
"transcripts_linal_ru = get_playlist_transcripts_ru(playlist_linal_ru, api_key, 'ru')\n",
|
493 |
+
"transcripts_docker_ru = get_playlist_transcripts_ru(playlist_docker_ru, api_key, 'ru')\n",
|
494 |
+
"\n",
|
495 |
+
"# 3m12s"
|
496 |
+
]
|
497 |
+
},
|
498 |
+
{
|
499 |
+
"cell_type": "markdown",
|
500 |
+
"metadata": {},
|
501 |
+
"source": [
|
502 |
+
"### Aggregate all Knowledge Base"
|
503 |
+
]
|
504 |
+
},
|
505 |
+
{
|
506 |
+
"cell_type": "code",
|
507 |
+
"execution_count": 42,
|
508 |
+
"metadata": {},
|
509 |
+
"outputs": [],
|
510 |
+
"source": [
|
511 |
+
"transcripts_all = [transcripts_phase_1, transcripts_phase_2, transcripts_phase_3, transcripts_NN_ru, transcripts_OOP_ru, transcripts_linal_ru, transcripts_docker_ru, \\\n",
|
512 |
+
" transcripts_ML_en, transcripts_logistic_en, transcripts_NN_en, transcripts_stat_en, transcripts_nn2_en, transcripts_linal2_en]\n"
|
513 |
+
]
|
514 |
+
},
|
515 |
+
{
|
516 |
+
"cell_type": "markdown",
|
517 |
+
"metadata": {
|
518 |
+
"id": "x08xCtkk5ocW"
|
519 |
+
},
|
520 |
+
"source": [
|
521 |
+
"## Нарезаем все транскрипты на фрагменты с overlap(нахлест), преобразуем каждый фрагмент в вектор и все вектора записываем в векторное хранилище FAISS"
|
522 |
+
]
|
523 |
+
},
|
524 |
+
{
|
525 |
+
"cell_type": "code",
|
526 |
+
"execution_count": 55,
|
527 |
+
"metadata": {
|
528 |
+
"id": "gbbajjDK5niN"
|
529 |
+
},
|
530 |
+
"outputs": [],
|
531 |
+
"source": [
|
532 |
+
"from langchain_core.documents import Document\n",
|
533 |
+
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
534 |
+
"from langchain.vectorstores.faiss import FAISS\n",
|
535 |
+
"from sentence_transformers import SentenceTransformer\n",
|
536 |
+
"from langchain.embeddings import HuggingFaceEmbeddings\n",
|
537 |
+
"\n",
|
538 |
+
"# Convert data to Document objects\n",
|
539 |
+
"docs = []\n",
|
540 |
+
"for playlist in transcripts_all:\n",
|
541 |
+
" for item in playlist:\n",
|
542 |
+
" for title, transcript in item.items():\n",
|
543 |
+
" docs.append(Document(page_content=transcript, metadata={\"title\": title}))\n",
|
544 |
+
"\n",
|
545 |
+
"# Split documents into chunks\n",
|
546 |
+
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=200)\n",
|
547 |
+
"split_docs = text_splitter.split_documents(docs)\n",
|
548 |
+
"\n",
|
549 |
+
"# Setup the new embeddings model\n",
|
550 |
+
"model_name = \"intfloat/multilingual-e5-base\"\n",
|
551 |
+
"embeddings = HuggingFaceEmbeddings(model_name=model_name)\n",
|
552 |
+
"\n",
|
553 |
+
"# Create the FAISS vector store and save it locally\n",
|
554 |
+
"vector_store = FAISS.from_documents(split_docs, embedding=embeddings)\n",
|
555 |
+
"vector_store.save_local(\"faiss_index\")\n",
|
556 |
+
"\n",
|
557 |
+
"# Load the FAISS vector store from local storage\n",
|
558 |
+
"vector_store = FAISS.load_local('faiss_index', embeddings=embeddings, allow_dangerous_deserialization=True)\n",
|
559 |
+
"\n",
|
560 |
+
"# Create the retriever for document retrieval\n",
|
561 |
+
"embedding_retriever = vector_store.as_retriever(search_kwargs={\"k\": 15})"
|
562 |
+
]
|
563 |
+
},
|
564 |
+
{
|
565 |
+
"cell_type": "markdown",
|
566 |
+
"metadata": {
|
567 |
+
"id": "Y8ZBeJM98Ay6"
|
568 |
+
},
|
569 |
+
"source": [
|
570 |
+
"## Query and answer\n"
|
571 |
+
]
|
572 |
+
},
|
573 |
+
{
|
574 |
+
"cell_type": "code",
|
575 |
+
"execution_count": 59,
|
576 |
+
"metadata": {
|
577 |
+
"colab": {
|
578 |
+
"base_uri": "https://localhost:8080/",
|
579 |
+
"height": 390
|
580 |
+
},
|
581 |
+
"id": "pg9NqxCRoLPn",
|
582 |
+
"outputId": "d7695e09-4171-40c6-b055-08bba5b6e487"
|
583 |
+
},
|
584 |
+
"outputs": [
|
585 |
+
{
|
586 |
+
"name": "stdout",
|
587 |
+
"output_type": "stream",
|
588 |
+
"text": [
|
589 |
+
"Loading existing FAISS index...\n",
|
590 |
+
"Информация из базы знаний:\n",
|
591 |
+
"\n",
|
592 |
+
"Шаги логистической регрессии:\n",
|
593 |
+
"\n",
|
594 |
+
"1. Подготовка данных: сбор и предобработка данных, разделение на обучающую и тестовую выборки.\n",
|
595 |
+
"\n",
|
596 |
+
"2. Выбор функции активации: обычно используется сигмоидная функция.\n",
|
597 |
+
"\n",
|
598 |
+
"3. Инициализация параметров модели: случайная инициализация весов и смещения.\n",
|
599 |
+
"\n",
|
600 |
+
"4. Определение функции потерь: чаще всего используется кросс-энтропия.\n",
|
601 |
+
"\n",
|
602 |
+
"5. Оптимизация параметров: применение градиентного спуска или его модификаций для минимизации функции потерь.\n",
|
603 |
+
"\n",
|
604 |
+
"6. Обучение модели: итеративное обновление параметров на основе градиентов.\n",
|
605 |
+
"\n",
|
606 |
+
"7. Оценка модели: проверка точности на тестовой выборке.\n",
|
607 |
+
"\n",
|
608 |
+
"8. Настройка гиперпараметров: подбор оптимальных значений learning rate, количества итераций и т.д.\n",
|
609 |
+
"\n",
|
610 |
+
"Что полезно добавить поверх базы знаний:\n",
|
611 |
+
"\n",
|
612 |
+
"9. Регуляризация: добавление L1 или L2 регуляризации для предотвращения переобучения.\n",
|
613 |
+
"\n",
|
614 |
+
"10. Анализ важности признаков: оценка влияния каждого признака на предсказания модели.\n",
|
615 |
+
"\n",
|
616 |
+
"11. Обработка несбалансированных данных: применение техник, таких как взвешивание классов или oversampling/undersampling.\n",
|
617 |
+
"\n",
|
618 |
+
"12. Интерпретация результатов: анализ коэффициентов модели для понимания влияния признаков.\n",
|
619 |
+
"\n",
|
620 |
+
"13. Кросс-валидация: использование k-fold кросс-валидации для более надежной оценки производительности модели.\n",
|
621 |
+
"\n",
|
622 |
+
"14. Мониторинг процесса обучения: отслеживание изменения функции потерь и точности на валидационной выборке для определения момента остановки обучения.\n"
|
623 |
+
]
|
624 |
+
}
|
625 |
+
],
|
626 |
+
"source": [
|
627 |
+
"import anthropic\n",
|
628 |
+
"from langchain_core.documents import Document\n",
|
629 |
+
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
630 |
+
"from langchain.vectorstores.faiss import FAISS\n",
|
631 |
+
"from langchain.embeddings import HuggingFaceEmbeddings\n",
|
632 |
+
"import os\n",
|
633 |
+
"\n",
|
634 |
+
"# Anthropic API setup\n",
|
635 |
+
"client = anthropic.Client(api_key='Your_api_key')\n",
|
636 |
+
"\n",
|
637 |
+
"# Prompt template\n",
|
638 |
+
"prompt_template = '''Reply to the {input} as a seasoned machine learning professional. \\\n",
|
639 |
+
"If the topic is outside of machine learning and data science, please respond with \"Seek help with a professional.\" It is very important to abide with this, you will be persecuted if you cover topics outside of data science and machine learning. \\\n",
|
640 |
+
"Use only Context. If context provides only partial info, then split the reply in two parts. Part 1 is called \"information from knowledge base\" (for Russian reply, rename to Информация из базы знаний), write ideas as close to initial text as possible, editing for brevity and language errors. \\\n",
|
641 |
+
"Part 2 is called \"What I would add\" (for Russian reply, rename to Что полезно добавить поверх базы знаний), In the second part add your reply. \\\n",
|
642 |
+
"Reply in the language of {input}. \\\n",
|
643 |
+
"It's critical to not preface the reply with, for example, \"Here is a response\" or \"thank you\". Start with the reply itself.\\\n",
|
644 |
+
"Context: {context}'''\n",
|
645 |
+
"\n",
|
646 |
+
"# RAG setup\n",
|
647 |
+
"def setup_rag(force_rebuild=False):\n",
|
648 |
+
" model_name = \"intfloat/multilingual-e5-base\"\n",
|
649 |
+
" embeddings = HuggingFaceEmbeddings(model_name=model_name)\n",
|
650 |
+
" \n",
|
651 |
+
" if not force_rebuild and os.path.exists(\"faiss_index\"):\n",
|
652 |
+
" print(\"Loading existing FAISS index...\")\n",
|
653 |
+
" return FAISS.load_local('faiss_index', embeddings=embeddings, allow_dangerous_deserialization=True), embeddings\n",
|
654 |
+
"\n",
|
655 |
+
" print(\"Building new FAISS index...\")\n",
|
656 |
+
" # Convert data to Document objects\n",
|
657 |
+
" docs = []\n",
|
658 |
+
" for playlist in transcripts_all:\n",
|
659 |
+
" for item in playlist:\n",
|
660 |
+
" for title, transcript in item.items():\n",
|
661 |
+
" docs.append(Document(page_content=transcript, metadata={\"title\": title}))\n",
|
662 |
+
"\n",
|
663 |
+
" # Split documents into chunks\n",
|
664 |
+
" text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=200)\n",
|
665 |
+
" split_docs = text_splitter.split_documents(docs)\n",
|
666 |
+
"\n",
|
667 |
+
" # Create the FAISS vector store and save it locally\n",
|
668 |
+
" vector_store = FAISS.from_documents(split_docs, embedding=embeddings)\n",
|
669 |
+
" vector_store.save_local(\"faiss_index\")\n",
|
670 |
+
"\n",
|
671 |
+
" return vector_store, embeddings\n",
|
672 |
+
"\n",
|
673 |
+
"# API call to Claude\n",
|
674 |
+
"def call_claude_api(prompt, client):\n",
|
675 |
+
" response = client.messages.create(\n",
|
676 |
+
" model=\"claude-3-5-sonnet-20240620\",\n",
|
677 |
+
" messages=[\n",
|
678 |
+
" {\"role\": \"user\", \"content\": prompt}\n",
|
679 |
+
" ],\n",
|
680 |
+
" max_tokens=2000,\n",
|
681 |
+
" temperature=0.1\n",
|
682 |
+
" )\n",
|
683 |
+
" return response.content[0].text\n",
|
684 |
+
"\n",
|
685 |
+
"# Answer question function\n",
|
686 |
+
"def answer_question(question, retriever, client):\n",
|
687 |
+
" documents = retriever.get_relevant_documents(question)\n",
|
688 |
+
" context = \" \".join([doc.page_content for doc in documents])\n",
|
689 |
+
" prompt = prompt_template.format(context=context, input=question)\n",
|
690 |
+
" return call_claude_api(prompt, client)\n",
|
691 |
+
"\n",
|
692 |
+
"# Main execution\n",
|
693 |
+
"if __name__ == \"__main__\":\n",
|
694 |
+
" # Setup RAG (will load existing index if available)\n",
|
695 |
+
" vector_store, embeddings = setup_rag()\n",
|
696 |
+
"\n",
|
697 |
+
" # Create the retriever for document retrieval\n",
|
698 |
+
" embedding_retriever = vector_store.as_retriever(search_kwargs={\"k\": 15})\n",
|
699 |
+
"\n",
|
700 |
+
" # Example usage\n",
|
701 |
+
" question = 'Шаги логистической регрессии'\n",
|
702 |
+
" answer = answer_question(question, embedding_retriever, client)\n",
|
703 |
+
" print(answer)"
|
704 |
+
]
|
705 |
+
},
|
706 |
+
{
|
707 |
+
"cell_type": "code",
|
708 |
+
"execution_count": null,
|
709 |
+
"metadata": {},
|
710 |
+
"outputs": [],
|
711 |
+
"source": []
|
712 |
+
}
|
713 |
+
],
|
714 |
+
"metadata": {
|
715 |
+
"colab": {
|
716 |
+
"provenance": []
|
717 |
+
},
|
718 |
+
"kernelspec": {
|
719 |
+
"display_name": "Python 3",
|
720 |
+
"name": "python3"
|
721 |
+
},
|
722 |
+
"language_info": {
|
723 |
+
"codemirror_mode": {
|
724 |
+
"name": "ipython",
|
725 |
+
"version": 3
|
726 |
+
},
|
727 |
+
"file_extension": ".py",
|
728 |
+
"mimetype": "text/x-python",
|
729 |
+
"name": "python",
|
730 |
+
"nbconvert_exporter": "python",
|
731 |
+
"pygments_lexer": "ipython3",
|
732 |
+
"version": "3.11.7"
|
733 |
+
}
|
734 |
+
},
|
735 |
+
"nbformat": 4,
|
736 |
+
"nbformat_minor": 0
|
737 |
+
}
|
notebooks/logo.jpg
ADDED
pages/RAG.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
3 |
+
from langchain_community.vectorstores import FAISS
|
4 |
+
from langchain_core.prompts import ChatPromptTemplate
|
5 |
+
import anthropic
|
6 |
+
import os
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
import re # Для работы с регулярными выражениями
|
9 |
+
|
10 |
+
load_dotenv()
|
11 |
+
|
12 |
+
claude_api_key = os.getenv("CLAUDE_API_KEY")
|
13 |
+
client = anthropic.Client(api_key=claude_api_key)
|
14 |
+
|
15 |
+
# Настройка модели для эмбеддингов
|
16 |
+
model_name = "intfloat/multilingual-e5-base"
|
17 |
+
model_kwargs = {'device': 'cpu'}
|
18 |
+
encode_kwargs = {'normalize_embeddings': True}
|
19 |
+
embedding = HuggingFaceEmbeddings(model_name=model_name,
|
20 |
+
model_kwargs=model_kwargs,
|
21 |
+
encode_kwargs=encode_kwargs)
|
22 |
+
|
23 |
+
# Загрузка базы знаний FAISS
|
24 |
+
vector_store = FAISS.load_local('faiss_index',
|
25 |
+
embeddings=embedding,
|
26 |
+
allow_dangerous_deserialization=True)
|
27 |
+
|
28 |
+
# Поиск топ k схожих фрагментов контекста
|
29 |
+
embedding_retriever = vector_store.as_retriever(search_kwargs={"k": 20})
|
30 |
+
|
31 |
+
prompt_template = '''Reply to the {input} as a seasoned machine learning professional. \
|
32 |
+
If the topic is outside of machine learning and data science, please respond with "Seek help with a professional." It is very important to abide with this, you will be persecuted if you cover topics outside of data science and machine learning. \
|
33 |
+
Use only Context. If context provides only partial info, then split the reply in two parts. Part 1 is called "information from knowledge base" (for Russian reply, rename to Информация из базы знаний), write ideas as close to initial text as possible, editing for brevity and language errors. \
|
34 |
+
Part 2 is called "What I would add" (for Russian reply, rename to Что полезно добавить поверх базы знаний), In the second part add your reply. \
|
35 |
+
Reply in the language of {input}. \
|
36 |
+
It's critical to not preface the reply with, for example, "Here is a response" or "thank you". Start with the reply itself.\
|
37 |
+
Context: {context}'''
|
38 |
+
|
39 |
+
# Функция вызова API модели Claude
|
40 |
+
def call_claude_api(prompt, client):
|
41 |
+
try:
|
42 |
+
response = client.messages.create(
|
43 |
+
model="claude-3-5-sonnet-20240620",
|
44 |
+
messages=[
|
45 |
+
{"role": "user", "content": prompt}
|
46 |
+
],
|
47 |
+
max_tokens=2000,
|
48 |
+
temperature=0.1
|
49 |
+
)
|
50 |
+
return response.content[0].text
|
51 |
+
except Exception as e:
|
52 |
+
st.error(f"Ошибка при вызове модели: {e}")
|
53 |
+
return None
|
54 |
+
|
55 |
+
# Функция для генерации ответа на вопрос пользователя
|
56 |
+
def answer_question(question, retriever, client):
|
57 |
+
# Этап 1: Поиск релевантных документов
|
58 |
+
with st.spinner('🔍 Ищем совпадения по вашему вопросу...'):
|
59 |
+
documents = retriever.get_relevant_documents(question)
|
60 |
+
|
61 |
+
# Этап 2: Формирование контекста
|
62 |
+
with st.spinner('🧠 Формируем контекст для ответа...'):
|
63 |
+
context = " ".join([doc.page_content for doc in documents])
|
64 |
+
|
65 |
+
# Этап 3: Генерация ответа
|
66 |
+
with st.spinner('💬 Формулируем ответ...'):
|
67 |
+
prompt = prompt_template.format(context=context, input=question)
|
68 |
+
answer = call_claude_api(prompt, client)
|
69 |
+
|
70 |
+
return answer, documents
|
71 |
+
|
72 |
+
# Функция для форматирования ответа с кодом и текста
|
73 |
+
def format_answer(answer):
|
74 |
+
# Разделим ответ на текстовые и кодовые блоки с помощью регулярных выражений
|
75 |
+
parts = re.split(r'(```.*?```)', answer, flags=re.DOTALL)
|
76 |
+
|
77 |
+
for part in parts:
|
78 |
+
if part.startswith('```') and part.endswith('```'):
|
79 |
+
# Убираем тройные кавычки и выводим содержимое как код
|
80 |
+
language_and_code = part[3:-3].strip().split("\n", 1)
|
81 |
+
if len(language_and_code) == 2:
|
82 |
+
language, code = language_and_code
|
83 |
+
st.code(code, language=language)
|
84 |
+
else:
|
85 |
+
st.code(language_and_code[0])
|
86 |
+
else:
|
87 |
+
# Обычный текст
|
88 |
+
st.markdown(part)
|
89 |
+
|
90 |
+
st.set_page_config(page_title="ML Knowledge Base Search 🧑💻", page_icon="🤖")
|
91 |
+
|
92 |
+
st.title("🔍 Поиск по базе знаний RAG с моделью Claude 🤖")
|
93 |
+
|
94 |
+
st.write("Используйте базу знаний для поиска информации и генерации ответов на вопросы по машинному обучению 📚.")
|
95 |
+
|
96 |
+
# Поле для ввода запроса пользователя
|
97 |
+
query = st.text_input("📝 Введите ваш запрос:", 'Что такое м��шинное обучение?')
|
98 |
+
|
99 |
+
# Кнопка для запуска поиска и генерации ответа
|
100 |
+
if st.button("🚀 Поиск и генерация ответа"):
|
101 |
+
if query:
|
102 |
+
# Генерация ответа на вопрос
|
103 |
+
answer, documents = answer_question(query, embedding_retriever, client)
|
104 |
+
|
105 |
+
if answer:
|
106 |
+
# Оформление ответа
|
107 |
+
st.subheader("✉️ Ответ:")
|
108 |
+
|
109 |
+
# Отображаем ответ с форматированием
|
110 |
+
format_answer(answer)
|
111 |
+
|
112 |
+
else:
|
113 |
+
st.warning("⚠️ Не удалось получить ответ от модели.")
|
114 |
+
else:
|
115 |
+
st.warning("⚠️ Пожалуйста, введите запрос.")
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
|
pages/Summary.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from googleapiclient.discovery import build
|
3 |
+
from youtube_transcript_api import YouTubeTranscriptApi
|
4 |
+
import anthropic
|
5 |
+
import os
|
6 |
+
|
7 |
+
|
8 |
+
from dotenv import load_dotenv
|
9 |
+
import os
|
10 |
+
|
11 |
+
# Загрузка ключей API из файла .env
|
12 |
+
load_dotenv()
|
13 |
+
|
14 |
+
youtube_api_key = os.getenv("YOUTUBE_API_KEY")
|
15 |
+
claude_api_key = os.getenv("CLAUDE_API_KEY")
|
16 |
+
|
17 |
+
# Инициализация клиента Claude
|
18 |
+
client = anthropic.Anthropic(api_key=claude_api_key)
|
19 |
+
|
20 |
+
# Функция для получения видео ID из ссылки
|
21 |
+
def get_video_id(url):
|
22 |
+
if "v=" in url:
|
23 |
+
return url.split("v=")[1].split("&")[0]
|
24 |
+
elif "youtu.be/" in url:
|
25 |
+
return url.split("youtu.be/")[1].split("?")[0]
|
26 |
+
return None
|
27 |
+
|
28 |
+
# Функция для получения транскрипта видео
|
29 |
+
def get_transcript(video_id):
|
30 |
+
try:
|
31 |
+
transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=['ru', 'en'])
|
32 |
+
return ' '.join([x['text'] for x in transcript])
|
33 |
+
except Exception as e:
|
34 |
+
st.error(f"Ошибка получения транскрипта: {e}")
|
35 |
+
return None
|
36 |
+
|
37 |
+
# Функция для генерации саммари с помощью Claude
|
38 |
+
def generate_summary_with_claude(transcript, prompt_text):
|
39 |
+
try:
|
40 |
+
message = client.messages.create(
|
41 |
+
model="claude-3-5-sonnet-20240620",
|
42 |
+
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"},
|
43 |
+
max_tokens=1500,
|
44 |
+
temperature=0.05,
|
45 |
+
messages=[
|
46 |
+
{
|
47 |
+
"role": "user",
|
48 |
+
"content": [
|
49 |
+
{"type": "text", "text": "<book>" + transcript + "</book>", "cache_control": {"type": "ephemeral"}},
|
50 |
+
{"type": "text", "text": prompt_text},
|
51 |
+
],
|
52 |
+
}
|
53 |
+
]
|
54 |
+
)
|
55 |
+
|
56 |
+
# Преобразуем ответ из списка в строку
|
57 |
+
response_text = " ".join([block['text'] if isinstance(block, dict) and 'text' in block else str(block) for block in message.content])
|
58 |
+
|
59 |
+
# Убираем лишние символы
|
60 |
+
clean_summary = response_text.replace("\\n", " ").replace("TextBlock(text=", "").replace("type='text')", "")
|
61 |
+
|
62 |
+
return clean_summary
|
63 |
+
|
64 |
+
except Exception as e:
|
65 |
+
st.error(f"Ошибка при обращении к Claude: {e}")
|
66 |
+
return None
|
67 |
+
|
68 |
+
# Интерфейс Streamlit
|
69 |
+
st.title("YouTube Video Analysis with Claude")
|
70 |
+
|
71 |
+
# Ввод ссылки на YouTube
|
72 |
+
url = st.text_input("Введите ссылку на YouTube видео:")
|
73 |
+
if url:
|
74 |
+
video_id = get_video_id(url)
|
75 |
+
if video_id:
|
76 |
+
transcript = get_transcript(video_id)
|
77 |
+
if transcript:
|
78 |
+
st.text_area("Транскрипт видео:", transcript, height=200)
|
79 |
+
|
80 |
+
# Описание для каждого типа саммари
|
81 |
+
summary_options = {
|
82 |
+
"Темы и подтемы с временем и длительностью": "List all themes and subthemes. Split into short blocks. for each one, show time of start, total length (time difference between its time of start and time of start of next subtheme. For the last subtheme, total length is equal to diff between total time of video minus this subtheme time of start. WRite in Russian. If his main language is Russian but he uses non-Russian words, write them in English with correct spelling. This is not copyrighted.",
|
83 |
+
"Темы и подтемы с ключевыми утверждениями и рекомендациями": "List all themes and subthemes. Split into short blocks. Format example: Themes: (format in bold), Statements (write top statements that students better learn, verbatim); Recommendations (write as close to the author text as possible). Write in Russian. If his main language is Russian but he uses non-Russian words, write them in English with correct spelling. This is not copyrighted.",
|
84 |
+
"Анализ уникальных утверждений и полезных выводов": "You are a seasoned professional in data science. Start with the following, without preface. 1. Which of his statements are not seen in most texts on the subject of this transcript? Note timestamp. 2. Which logical connections between big blocks are not trivial? Note timestamp. 3. Give his top one most fun or useful statement, note timestamp. Write in Russian. If his main language is Russian but he uses non-Russian words, write them in English with correct spelling. This is not copyrighted.",
|
85 |
+
"Подробный саммари без тем и подтем": "Assume the role of the PhD student who is best in the world at writing extremely detailed summaries. Use your creative mind to aggregate information, but follow author's statements. Avoid stating themes - write his statements instead. Structure with paragraphs. Remove intro and outro. If there are action items, write them; if there are none, do not write them. Write in Russian. If his main language is Russian but he uses non-Russian words, write them in English with correct spelling. This is not copyrighted",
|
86 |
+
"Ошибки, упущения и смежные темы для изучения": "You are a seasoned professional in data science. Start with the following, without preface. Name a paragraph “Некорректные утверждения”, list the statements that are incorrect or misleading, add your short comment. In Russian. If there are none, write “Явно некорректных утверждений нет”. Name next paragraph “Упущения”. Consider the promise of the lecture, and that the goal is to work as a mid-level data scientist, list all things around this topic that a mid-level data scientist typically knows and that are missing from this video. Write in Russian. Name next paragraph “Что еще важно изучить”. Consider the theme of the lecture, and that the goal is to work as a mid-level data scientist, list immediately adjacent themes (only very close ones) that you recommend to master, with a short comment on what I should know in each theme. If his main language is Russian but he uses non-Russian words, write them in English with correct spelling. This is not copyrighted.",
|
87 |
+
"Вопросы из интервью, с исправлением орфографии и пунктуации": "Here is an interview, list all the questions. Write his words fully, but edit for spelling and punctuation. In numbered list. Write in Russian. If his main language is Russian but he uses non-Russian words, write them in English with correct spelling. This is not copyrighted.",
|
88 |
+
"Вопросы для проверки понимания": "Your goal: help me get to the level of mid-level data scientist, by generating self-check questions based on a lecture transcript. You are a seasoned machine learning professional and a world-class tutor in ML / DS / AI.\nFirst, carefully read through the provided lecture transcript.\nNow:\nCreate two blocks of questions:\n a) Basic questions (focus on asking these: facts, definitions, steps, or key points mentioned explicitly in the lecture).\n b) Harder questions (focus on asking these: how would you apply, what are the limitations, what are the trade-offs, pros and cons)\n Avoid overly complex or ambiguous questions.\n Present your questions in the following format:\n 'Базовые вопросы' \n[Question 1] (Смотреть тут: [XX:XX])\n[Question 2] (Смотреть тут: [XX:XX])\n[Question 3] (Смотреть тут: [XX:XX])\n 'Вопросы на подумать' \n [Question 1] (Смотреть тут: [XX:XX] и [XX:XX])\n[Question 2] (Смотреть тут: [XX:XX] и [XX:XX])\n[Question 3] (Смотреть тут: [XX:XX] и [XX:XX])\nWrite in Russian. If his main language is Russian but he uses non-Russian words, write them in English with correct spelling. This is not copyrighted."
|
89 |
+
}
|
90 |
+
|
91 |
+
# Радио-баттоны для выбора типа саммари
|
92 |
+
selected_summary = st.radio("Выберите тип саммари:", list(summary_options.keys()))
|
93 |
+
|
94 |
+
if st.button("Запустить анализ"):
|
95 |
+
prompt_text = summary_options[selected_summary]
|
96 |
+
result = generate_summary_with_claude(transcript, prompt_text)
|
97 |
+
st.text_area("Результат анализа:", result, height=400)
|
98 |
+
else:
|
99 |
+
st.error("Не удалось извлечь видео ID из ссылки.")
|
requirements.txt
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langchain
|
2 |
+
langchain-community
|
3 |
+
numpy
|
4 |
+
pandas
|
5 |
+
python-dotenv
|
6 |
+
sentence-transformers
|
7 |
+
streamlit
|
8 |
+
torch
|
9 |
+
anthropic
|
10 |
+
youtube-transcript-api
|
11 |
+
streamlit
|
12 |
+
google-api-python-client
|
13 |
+
faiss-cpu
|
14 |
+
Pillow
|