status
stringclasses 1
value | repo_name
stringclasses 31
values | repo_url
stringclasses 31
values | issue_id
int64 1
104k
| title
stringlengths 4
233
| body
stringlengths 0
186k
⌀ | issue_url
stringlengths 38
56
| pull_url
stringlengths 37
54
| before_fix_sha
stringlengths 40
40
| after_fix_sha
stringlengths 40
40
| report_datetime
unknown | language
stringclasses 5
values | commit_datetime
unknown | updated_file
stringlengths 7
188
| chunk_content
stringlengths 1
1.03M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,944 | Question Answering over Docs giving cryptic error upon query | After ingesting some markdown files using a slightly modified version of the question-answering over docs example, I ran the qa.py script as it was in the example
```
# qa.py
import faiss
from langchain import OpenAI, HuggingFaceHub, LLMChain
from langchain.chains import VectorDBQAWithSourcesChain
import pickle
import argparse
parser = argparse.ArgumentParser(description='Ask a question to the notion DB.')
parser.add_argument('question', type=str, help='The question to ask the notion DB')
args = parser.parse_args()
# Load the LangChain.
index = faiss.read_index("docs.index")
with open("faiss_store.pkl", "rb") as f:
store = pickle.load(f)
store.index = index
chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=store)
result = chain({"question": args.question})
print(f"Answer: {result['answer']}")
```
Only to get this cryptic error
```
Traceback (most recent call last):
File "C:\Users\ahmad\OneDrive\Desktop\Coding\LANGCHAINSSSSSS\notion-qa\qa.py", line 22, in <module>
result = chain({"question": args.question})
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 146, in __call__
raise e
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 142, in __call__
outputs = self._call(inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\qa_with_sources\base.py", line 97, in _call
answer, _ = self.combine_document_chain.combine_docs(docs, **inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\map_reduce.py", line 150, in combine_docs
num_tokens = length_func(result_docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 77, in prompt_length
inputs = self._get_inputs(docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 64, in _get_inputs
document_info = {
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 65, in <dictcomp>
k: base_info[k] for k in self.document_prompt.input_variables
KeyError: 'source'
```
Here is the code I used for ingesting
|
```
"""This is the logic for ingesting Notion data into LangChain."""
from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
import time
from tqdm import tqdm
# Here we load in the data in the format that Notion exports it in.
folder = list(Path("Notion_DB/").glob("**/*.md"))
files = []
sources = []
for myFile in folder:
with open(myFile, 'r', encoding='utf-8') as f:
print(myFile.name)
files.append(f.read())
sources.append(myFile)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = CharacterTextSplitter(chunk_size=800, separator="\n")
docs = []
metadatas = []
for i, f in enumerate(files):
splits = text_splitter.split_text(f)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Add each element in docs into FAISS store, keeping a delay between inserting elements so we don't exceed rate limit
store = None
for (index, chunk) in tqdm(enumerate(docs)):
if index == 0:
store = FAISS.from_texts([chunk], OpenAIEmbeddings())
else:
time.sleep(1) # wait for a second to not exceed any rate limits
store.add_texts([chunk])
# print('finished with index '+index.__str__())
print('Done yayy!')
# # Here we create a vector store from the documents and save it to disk.
faiss.write_index(store.index, "docs.index")
store.index = None
with open("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
```
| https://github.com/langchain-ai/langchain/issues/2944 | https://github.com/langchain-ai/langchain/pull/3026 | 3453b7457ca60227430d85e6f6f58a2aafae559d | 19c85aa9907765c0a2dbe7c46e9d5dd2d6df0f30 | "2023-04-15T15:38:36Z" | python | "2023-04-18T03:28:01Z" | langchain/chains/combine_documents/refine.py | """Combine by mapping first chain over all, then stuffing into final chain."""
inputs = self._construct_initial_inputs(docs, **kwargs)
res = self.initial_llm_chain.predict(**inputs)
refine_steps = [res]
for doc in docs[1:]:
base_inputs = self._construct_refine_inputs(doc, res)
inputs = {**base_inputs, **kwargs}
res = self.refine_llm_chain.predict(**inputs)
refine_steps.append(res)
return self._construct_result(refine_steps, res)
async def acombine_docs(
self, docs: List[Document], **kwargs: Any
) -> Tuple[str, dict]:
"""Combine by mapping first chain over all, then stuffing into final chain."""
inputs = self._construct_initial_inputs(docs, **kwargs)
res = await self.initial_llm_chain.apredict(**inputs)
refine_steps = [res]
for doc in docs[1:]:
base_inputs = self._construct_refine_inputs(doc, res)
inputs = {**base_inputs, **kwargs}
res = await self.refine_llm_chain.apredict(**inputs)
refine_steps.append(res)
return self._construct_result(refine_steps, res)
def _construct_result(self, refine_steps: List[str], res: str) -> Tuple[str, dict]: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,944 | Question Answering over Docs giving cryptic error upon query | After ingesting some markdown files using a slightly modified version of the question-answering over docs example, I ran the qa.py script as it was in the example
```
# qa.py
import faiss
from langchain import OpenAI, HuggingFaceHub, LLMChain
from langchain.chains import VectorDBQAWithSourcesChain
import pickle
import argparse
parser = argparse.ArgumentParser(description='Ask a question to the notion DB.')
parser.add_argument('question', type=str, help='The question to ask the notion DB')
args = parser.parse_args()
# Load the LangChain.
index = faiss.read_index("docs.index")
with open("faiss_store.pkl", "rb") as f:
store = pickle.load(f)
store.index = index
chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=store)
result = chain({"question": args.question})
print(f"Answer: {result['answer']}")
```
Only to get this cryptic error
```
Traceback (most recent call last):
File "C:\Users\ahmad\OneDrive\Desktop\Coding\LANGCHAINSSSSSS\notion-qa\qa.py", line 22, in <module>
result = chain({"question": args.question})
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 146, in __call__
raise e
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 142, in __call__
outputs = self._call(inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\qa_with_sources\base.py", line 97, in _call
answer, _ = self.combine_document_chain.combine_docs(docs, **inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\map_reduce.py", line 150, in combine_docs
num_tokens = length_func(result_docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 77, in prompt_length
inputs = self._get_inputs(docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 64, in _get_inputs
document_info = {
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 65, in <dictcomp>
k: base_info[k] for k in self.document_prompt.input_variables
KeyError: 'source'
```
Here is the code I used for ingesting
|
```
"""This is the logic for ingesting Notion data into LangChain."""
from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
import time
from tqdm import tqdm
# Here we load in the data in the format that Notion exports it in.
folder = list(Path("Notion_DB/").glob("**/*.md"))
files = []
sources = []
for myFile in folder:
with open(myFile, 'r', encoding='utf-8') as f:
print(myFile.name)
files.append(f.read())
sources.append(myFile)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = CharacterTextSplitter(chunk_size=800, separator="\n")
docs = []
metadatas = []
for i, f in enumerate(files):
splits = text_splitter.split_text(f)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Add each element in docs into FAISS store, keeping a delay between inserting elements so we don't exceed rate limit
store = None
for (index, chunk) in tqdm(enumerate(docs)):
if index == 0:
store = FAISS.from_texts([chunk], OpenAIEmbeddings())
else:
time.sleep(1) # wait for a second to not exceed any rate limits
store.add_texts([chunk])
# print('finished with index '+index.__str__())
print('Done yayy!')
# # Here we create a vector store from the documents and save it to disk.
faiss.write_index(store.index, "docs.index")
store.index = None
with open("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
```
| https://github.com/langchain-ai/langchain/issues/2944 | https://github.com/langchain-ai/langchain/pull/3026 | 3453b7457ca60227430d85e6f6f58a2aafae559d | 19c85aa9907765c0a2dbe7c46e9d5dd2d6df0f30 | "2023-04-15T15:38:36Z" | python | "2023-04-18T03:28:01Z" | langchain/chains/combine_documents/refine.py | if self.return_intermediate_steps:
extra_return_dict = {"intermediate_steps": refine_steps}
else:
extra_return_dict = {}
return res, extra_return_dict
def _construct_refine_inputs(self, doc: Document, res: str) -> Dict[str, Any]:
base_info = {"page_content": doc.page_content}
base_info.update(doc.metadata)
document_info = {k: base_info[k] for k in self.document_prompt.input_variables}
base_inputs = {
self.document_variable_name: self.document_prompt.format(**document_info),
self.initial_response_name: res,
}
return base_inputs
def _construct_initial_inputs(
self, docs: List[Document], **kwargs: Any
) -> Dict[str, Any]:
base_info = {"page_content": docs[0].page_content}
base_info.update(docs[0].metadata)
document_info = {k: base_info[k] for k in self.document_prompt.input_variables}
base_inputs: dict = {
self.document_variable_name: self.document_prompt.format(**document_info)
}
inputs = {**base_inputs, **kwargs}
return inputs
@property
def _chain_type(self) -> str:
return "refine_documents_chain" |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,944 | Question Answering over Docs giving cryptic error upon query | After ingesting some markdown files using a slightly modified version of the question-answering over docs example, I ran the qa.py script as it was in the example
```
# qa.py
import faiss
from langchain import OpenAI, HuggingFaceHub, LLMChain
from langchain.chains import VectorDBQAWithSourcesChain
import pickle
import argparse
parser = argparse.ArgumentParser(description='Ask a question to the notion DB.')
parser.add_argument('question', type=str, help='The question to ask the notion DB')
args = parser.parse_args()
# Load the LangChain.
index = faiss.read_index("docs.index")
with open("faiss_store.pkl", "rb") as f:
store = pickle.load(f)
store.index = index
chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=store)
result = chain({"question": args.question})
print(f"Answer: {result['answer']}")
```
Only to get this cryptic error
```
Traceback (most recent call last):
File "C:\Users\ahmad\OneDrive\Desktop\Coding\LANGCHAINSSSSSS\notion-qa\qa.py", line 22, in <module>
result = chain({"question": args.question})
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 146, in __call__
raise e
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 142, in __call__
outputs = self._call(inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\qa_with_sources\base.py", line 97, in _call
answer, _ = self.combine_document_chain.combine_docs(docs, **inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\map_reduce.py", line 150, in combine_docs
num_tokens = length_func(result_docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 77, in prompt_length
inputs = self._get_inputs(docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 64, in _get_inputs
document_info = {
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 65, in <dictcomp>
k: base_info[k] for k in self.document_prompt.input_variables
KeyError: 'source'
```
Here is the code I used for ingesting
|
```
"""This is the logic for ingesting Notion data into LangChain."""
from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
import time
from tqdm import tqdm
# Here we load in the data in the format that Notion exports it in.
folder = list(Path("Notion_DB/").glob("**/*.md"))
files = []
sources = []
for myFile in folder:
with open(myFile, 'r', encoding='utf-8') as f:
print(myFile.name)
files.append(f.read())
sources.append(myFile)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = CharacterTextSplitter(chunk_size=800, separator="\n")
docs = []
metadatas = []
for i, f in enumerate(files):
splits = text_splitter.split_text(f)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Add each element in docs into FAISS store, keeping a delay between inserting elements so we don't exceed rate limit
store = None
for (index, chunk) in tqdm(enumerate(docs)):
if index == 0:
store = FAISS.from_texts([chunk], OpenAIEmbeddings())
else:
time.sleep(1) # wait for a second to not exceed any rate limits
store.add_texts([chunk])
# print('finished with index '+index.__str__())
print('Done yayy!')
# # Here we create a vector store from the documents and save it to disk.
faiss.write_index(store.index, "docs.index")
store.index = None
with open("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
```
| https://github.com/langchain-ai/langchain/issues/2944 | https://github.com/langchain-ai/langchain/pull/3026 | 3453b7457ca60227430d85e6f6f58a2aafae559d | 19c85aa9907765c0a2dbe7c46e9d5dd2d6df0f30 | "2023-04-15T15:38:36Z" | python | "2023-04-18T03:28:01Z" | langchain/chains/combine_documents/stuff.py | """Chain that combines documents by stuffing into context."""
from typing import Any, Dict, List, Optional, Tuple
from pydantic import Extra, Field, root_validator
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.prompt import PromptTemplate
def _get_default_document_prompt() -> PromptTemplate:
return PromptTemplate(input_variables=["page_content"], template="{page_content}")
class StuffDocumentsChain(BaseCombineDocumentsChain):
"""Chain that combines documents by stuffing into context."""
llm_chain: LLMChain
"""LLM wrapper to use after formatting documents."""
document_prompt: BasePromptTemplate = Field(
default_factory=_get_default_document_prompt
)
"""Prompt to use to format each document."""
document_variable_name: str
"""The variable name in the llm_chain to put the documents in.
If only one variable in the llm_chain, this need not be provided."""
class Config: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,944 | Question Answering over Docs giving cryptic error upon query | After ingesting some markdown files using a slightly modified version of the question-answering over docs example, I ran the qa.py script as it was in the example
```
# qa.py
import faiss
from langchain import OpenAI, HuggingFaceHub, LLMChain
from langchain.chains import VectorDBQAWithSourcesChain
import pickle
import argparse
parser = argparse.ArgumentParser(description='Ask a question to the notion DB.')
parser.add_argument('question', type=str, help='The question to ask the notion DB')
args = parser.parse_args()
# Load the LangChain.
index = faiss.read_index("docs.index")
with open("faiss_store.pkl", "rb") as f:
store = pickle.load(f)
store.index = index
chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=store)
result = chain({"question": args.question})
print(f"Answer: {result['answer']}")
```
Only to get this cryptic error
```
Traceback (most recent call last):
File "C:\Users\ahmad\OneDrive\Desktop\Coding\LANGCHAINSSSSSS\notion-qa\qa.py", line 22, in <module>
result = chain({"question": args.question})
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 146, in __call__
raise e
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 142, in __call__
outputs = self._call(inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\qa_with_sources\base.py", line 97, in _call
answer, _ = self.combine_document_chain.combine_docs(docs, **inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\map_reduce.py", line 150, in combine_docs
num_tokens = length_func(result_docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 77, in prompt_length
inputs = self._get_inputs(docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 64, in _get_inputs
document_info = {
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 65, in <dictcomp>
k: base_info[k] for k in self.document_prompt.input_variables
KeyError: 'source'
```
Here is the code I used for ingesting
|
```
"""This is the logic for ingesting Notion data into LangChain."""
from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
import time
from tqdm import tqdm
# Here we load in the data in the format that Notion exports it in.
folder = list(Path("Notion_DB/").glob("**/*.md"))
files = []
sources = []
for myFile in folder:
with open(myFile, 'r', encoding='utf-8') as f:
print(myFile.name)
files.append(f.read())
sources.append(myFile)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = CharacterTextSplitter(chunk_size=800, separator="\n")
docs = []
metadatas = []
for i, f in enumerate(files):
splits = text_splitter.split_text(f)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Add each element in docs into FAISS store, keeping a delay between inserting elements so we don't exceed rate limit
store = None
for (index, chunk) in tqdm(enumerate(docs)):
if index == 0:
store = FAISS.from_texts([chunk], OpenAIEmbeddings())
else:
time.sleep(1) # wait for a second to not exceed any rate limits
store.add_texts([chunk])
# print('finished with index '+index.__str__())
print('Done yayy!')
# # Here we create a vector store from the documents and save it to disk.
faiss.write_index(store.index, "docs.index")
store.index = None
with open("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
```
| https://github.com/langchain-ai/langchain/issues/2944 | https://github.com/langchain-ai/langchain/pull/3026 | 3453b7457ca60227430d85e6f6f58a2aafae559d | 19c85aa9907765c0a2dbe7c46e9d5dd2d6df0f30 | "2023-04-15T15:38:36Z" | python | "2023-04-18T03:28:01Z" | langchain/chains/combine_documents/stuff.py | """Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def get_default_document_variable_name(cls, values: Dict) -> Dict:
"""Get default document variable name, if not provided."""
if "document_variable_name" not in values:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
raise ValueError(
"document_variable_name must be provided if there are "
"multiple llm_chain_variables"
)
else:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if values["document_variable_name"] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was "
f"not found in llm_chain input_variables: {llm_chain_variables}"
)
return values
def _get_inputs(self, docs: List[Document], **kwargs: Any) -> dict: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,944 | Question Answering over Docs giving cryptic error upon query | After ingesting some markdown files using a slightly modified version of the question-answering over docs example, I ran the qa.py script as it was in the example
```
# qa.py
import faiss
from langchain import OpenAI, HuggingFaceHub, LLMChain
from langchain.chains import VectorDBQAWithSourcesChain
import pickle
import argparse
parser = argparse.ArgumentParser(description='Ask a question to the notion DB.')
parser.add_argument('question', type=str, help='The question to ask the notion DB')
args = parser.parse_args()
# Load the LangChain.
index = faiss.read_index("docs.index")
with open("faiss_store.pkl", "rb") as f:
store = pickle.load(f)
store.index = index
chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=store)
result = chain({"question": args.question})
print(f"Answer: {result['answer']}")
```
Only to get this cryptic error
```
Traceback (most recent call last):
File "C:\Users\ahmad\OneDrive\Desktop\Coding\LANGCHAINSSSSSS\notion-qa\qa.py", line 22, in <module>
result = chain({"question": args.question})
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 146, in __call__
raise e
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 142, in __call__
outputs = self._call(inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\qa_with_sources\base.py", line 97, in _call
answer, _ = self.combine_document_chain.combine_docs(docs, **inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\map_reduce.py", line 150, in combine_docs
num_tokens = length_func(result_docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 77, in prompt_length
inputs = self._get_inputs(docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 64, in _get_inputs
document_info = {
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 65, in <dictcomp>
k: base_info[k] for k in self.document_prompt.input_variables
KeyError: 'source'
```
Here is the code I used for ingesting
|
```
"""This is the logic for ingesting Notion data into LangChain."""
from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
import time
from tqdm import tqdm
# Here we load in the data in the format that Notion exports it in.
folder = list(Path("Notion_DB/").glob("**/*.md"))
files = []
sources = []
for myFile in folder:
with open(myFile, 'r', encoding='utf-8') as f:
print(myFile.name)
files.append(f.read())
sources.append(myFile)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = CharacterTextSplitter(chunk_size=800, separator="\n")
docs = []
metadatas = []
for i, f in enumerate(files):
splits = text_splitter.split_text(f)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Add each element in docs into FAISS store, keeping a delay between inserting elements so we don't exceed rate limit
store = None
for (index, chunk) in tqdm(enumerate(docs)):
if index == 0:
store = FAISS.from_texts([chunk], OpenAIEmbeddings())
else:
time.sleep(1) # wait for a second to not exceed any rate limits
store.add_texts([chunk])
# print('finished with index '+index.__str__())
print('Done yayy!')
# # Here we create a vector store from the documents and save it to disk.
faiss.write_index(store.index, "docs.index")
store.index = None
with open("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
```
| https://github.com/langchain-ai/langchain/issues/2944 | https://github.com/langchain-ai/langchain/pull/3026 | 3453b7457ca60227430d85e6f6f58a2aafae559d | 19c85aa9907765c0a2dbe7c46e9d5dd2d6df0f30 | "2023-04-15T15:38:36Z" | python | "2023-04-18T03:28:01Z" | langchain/chains/combine_documents/stuff.py | doc_dicts = []
for doc in docs:
base_info = {"page_content": doc.page_content}
base_info.update(doc.metadata)
document_info = {
k: base_info[k] for k in self.document_prompt.input_variables
}
doc_dicts.append(document_info)
doc_strings = [self.document_prompt.format(**doc) for doc in doc_dicts]
inputs = {
k: v
for k, v in kwargs.items()
if k in self.llm_chain.prompt.input_variables
}
inputs[self.document_variable_name] = "\n\n".join(doc_strings)
return inputs
def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,944 | Question Answering over Docs giving cryptic error upon query | After ingesting some markdown files using a slightly modified version of the question-answering over docs example, I ran the qa.py script as it was in the example
```
# qa.py
import faiss
from langchain import OpenAI, HuggingFaceHub, LLMChain
from langchain.chains import VectorDBQAWithSourcesChain
import pickle
import argparse
parser = argparse.ArgumentParser(description='Ask a question to the notion DB.')
parser.add_argument('question', type=str, help='The question to ask the notion DB')
args = parser.parse_args()
# Load the LangChain.
index = faiss.read_index("docs.index")
with open("faiss_store.pkl", "rb") as f:
store = pickle.load(f)
store.index = index
chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=store)
result = chain({"question": args.question})
print(f"Answer: {result['answer']}")
```
Only to get this cryptic error
```
Traceback (most recent call last):
File "C:\Users\ahmad\OneDrive\Desktop\Coding\LANGCHAINSSSSSS\notion-qa\qa.py", line 22, in <module>
result = chain({"question": args.question})
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 146, in __call__
raise e
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 142, in __call__
outputs = self._call(inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\qa_with_sources\base.py", line 97, in _call
answer, _ = self.combine_document_chain.combine_docs(docs, **inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\map_reduce.py", line 150, in combine_docs
num_tokens = length_func(result_docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 77, in prompt_length
inputs = self._get_inputs(docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 64, in _get_inputs
document_info = {
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 65, in <dictcomp>
k: base_info[k] for k in self.document_prompt.input_variables
KeyError: 'source'
```
Here is the code I used for ingesting
|
```
"""This is the logic for ingesting Notion data into LangChain."""
from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
import time
from tqdm import tqdm
# Here we load in the data in the format that Notion exports it in.
folder = list(Path("Notion_DB/").glob("**/*.md"))
files = []
sources = []
for myFile in folder:
with open(myFile, 'r', encoding='utf-8') as f:
print(myFile.name)
files.append(f.read())
sources.append(myFile)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = CharacterTextSplitter(chunk_size=800, separator="\n")
docs = []
metadatas = []
for i, f in enumerate(files):
splits = text_splitter.split_text(f)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Add each element in docs into FAISS store, keeping a delay between inserting elements so we don't exceed rate limit
store = None
for (index, chunk) in tqdm(enumerate(docs)):
if index == 0:
store = FAISS.from_texts([chunk], OpenAIEmbeddings())
else:
time.sleep(1) # wait for a second to not exceed any rate limits
store.add_texts([chunk])
# print('finished with index '+index.__str__())
print('Done yayy!')
# # Here we create a vector store from the documents and save it to disk.
faiss.write_index(store.index, "docs.index")
store.index = None
with open("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
```
| https://github.com/langchain-ai/langchain/issues/2944 | https://github.com/langchain-ai/langchain/pull/3026 | 3453b7457ca60227430d85e6f6f58a2aafae559d | 19c85aa9907765c0a2dbe7c46e9d5dd2d6df0f30 | "2023-04-15T15:38:36Z" | python | "2023-04-18T03:28:01Z" | langchain/chains/combine_documents/stuff.py | """Get the prompt length by formatting the prompt."""
inputs = self._get_inputs(docs, **kwargs)
prompt = self.llm_chain.prompt.format(**inputs)
return self.llm_chain.llm.get_num_tokens(prompt)
def combine_docs(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]:
"""Stuff all documents into one prompt and pass to LLM."""
inputs = self._get_inputs(docs, **kwargs)
return self.llm_chain.predict(**inputs), {}
async def acombine_docs(
self, docs: List[Document], **kwargs: Any
) -> Tuple[str, dict]:
"""Stuff all documents into one prompt and pass to LLM."""
inputs = self._get_inputs(docs, **kwargs)
return await self.llm_chain.apredict(**inputs), {}
@property
def _chain_type(self) -> str:
return "stuff_documents_chain" |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,944 | Question Answering over Docs giving cryptic error upon query | After ingesting some markdown files using a slightly modified version of the question-answering over docs example, I ran the qa.py script as it was in the example
```
# qa.py
import faiss
from langchain import OpenAI, HuggingFaceHub, LLMChain
from langchain.chains import VectorDBQAWithSourcesChain
import pickle
import argparse
parser = argparse.ArgumentParser(description='Ask a question to the notion DB.')
parser.add_argument('question', type=str, help='The question to ask the notion DB')
args = parser.parse_args()
# Load the LangChain.
index = faiss.read_index("docs.index")
with open("faiss_store.pkl", "rb") as f:
store = pickle.load(f)
store.index = index
chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=store)
result = chain({"question": args.question})
print(f"Answer: {result['answer']}")
```
Only to get this cryptic error
```
Traceback (most recent call last):
File "C:\Users\ahmad\OneDrive\Desktop\Coding\LANGCHAINSSSSSS\notion-qa\qa.py", line 22, in <module>
result = chain({"question": args.question})
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 146, in __call__
raise e
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 142, in __call__
outputs = self._call(inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\qa_with_sources\base.py", line 97, in _call
answer, _ = self.combine_document_chain.combine_docs(docs, **inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\map_reduce.py", line 150, in combine_docs
num_tokens = length_func(result_docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 77, in prompt_length
inputs = self._get_inputs(docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 64, in _get_inputs
document_info = {
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 65, in <dictcomp>
k: base_info[k] for k in self.document_prompt.input_variables
KeyError: 'source'
```
Here is the code I used for ingesting
|
```
"""This is the logic for ingesting Notion data into LangChain."""
from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
import time
from tqdm import tqdm
# Here we load in the data in the format that Notion exports it in.
folder = list(Path("Notion_DB/").glob("**/*.md"))
files = []
sources = []
for myFile in folder:
with open(myFile, 'r', encoding='utf-8') as f:
print(myFile.name)
files.append(f.read())
sources.append(myFile)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = CharacterTextSplitter(chunk_size=800, separator="\n")
docs = []
metadatas = []
for i, f in enumerate(files):
splits = text_splitter.split_text(f)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Add each element in docs into FAISS store, keeping a delay between inserting elements so we don't exceed rate limit
store = None
for (index, chunk) in tqdm(enumerate(docs)):
if index == 0:
store = FAISS.from_texts([chunk], OpenAIEmbeddings())
else:
time.sleep(1) # wait for a second to not exceed any rate limits
store.add_texts([chunk])
# print('finished with index '+index.__str__())
print('Done yayy!')
# # Here we create a vector store from the documents and save it to disk.
faiss.write_index(store.index, "docs.index")
store.index = None
with open("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
```
| https://github.com/langchain-ai/langchain/issues/2944 | https://github.com/langchain-ai/langchain/pull/3026 | 3453b7457ca60227430d85e6f6f58a2aafae559d | 19c85aa9907765c0a2dbe7c46e9d5dd2d6df0f30 | "2023-04-15T15:38:36Z" | python | "2023-04-18T03:28:01Z" | tests/unit_tests/chains/test_combine_documents.py | """Test functionality related to combining documents."""
from typing import Any, List
import pytest
from langchain.chains.combine_documents.map_reduce import (
_collapse_docs,
_split_list_of_docs,
)
from langchain.docstore.document import Document
def _fake_docs_len_func(docs: List[Document]) -> int:
return len(_fake_combine_docs_func(docs))
def _fake_combine_docs_func(docs: List[Document], **kwargs: Any) -> str:
return "".join([d.page_content for d in docs])
def test__split_list_long_single_doc() -> None:
"""Test splitting of a long single doc."""
docs = [Document(page_content="foo" * 100)]
with pytest.raises(ValueError):
_split_list_of_docs(docs, _fake_docs_len_func, 100)
def test__split_list_long_pair_doc() -> None:
"""Test splitting of a list with two medium docs."""
docs = [Document(page_content="foo" * 30)] * 2
with pytest.raises(ValueError):
_split_list_of_docs(docs, _fake_docs_len_func, 100)
def test__split_list_single_doc() -> None:
"""Test splitting works with just a single doc."""
docs = [Document(page_content="foo")]
doc_list = _split_list_of_docs(docs, _fake_docs_len_func, 100)
assert doc_list == [docs]
def test__split_list_double_doc() -> None: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,944 | Question Answering over Docs giving cryptic error upon query | After ingesting some markdown files using a slightly modified version of the question-answering over docs example, I ran the qa.py script as it was in the example
```
# qa.py
import faiss
from langchain import OpenAI, HuggingFaceHub, LLMChain
from langchain.chains import VectorDBQAWithSourcesChain
import pickle
import argparse
parser = argparse.ArgumentParser(description='Ask a question to the notion DB.')
parser.add_argument('question', type=str, help='The question to ask the notion DB')
args = parser.parse_args()
# Load the LangChain.
index = faiss.read_index("docs.index")
with open("faiss_store.pkl", "rb") as f:
store = pickle.load(f)
store.index = index
chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=store)
result = chain({"question": args.question})
print(f"Answer: {result['answer']}")
```
Only to get this cryptic error
```
Traceback (most recent call last):
File "C:\Users\ahmad\OneDrive\Desktop\Coding\LANGCHAINSSSSSS\notion-qa\qa.py", line 22, in <module>
result = chain({"question": args.question})
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 146, in __call__
raise e
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 142, in __call__
outputs = self._call(inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\qa_with_sources\base.py", line 97, in _call
answer, _ = self.combine_document_chain.combine_docs(docs, **inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\map_reduce.py", line 150, in combine_docs
num_tokens = length_func(result_docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 77, in prompt_length
inputs = self._get_inputs(docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 64, in _get_inputs
document_info = {
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 65, in <dictcomp>
k: base_info[k] for k in self.document_prompt.input_variables
KeyError: 'source'
```
Here is the code I used for ingesting
|
```
"""This is the logic for ingesting Notion data into LangChain."""
from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
import time
from tqdm import tqdm
# Here we load in the data in the format that Notion exports it in.
folder = list(Path("Notion_DB/").glob("**/*.md"))
files = []
sources = []
for myFile in folder:
with open(myFile, 'r', encoding='utf-8') as f:
print(myFile.name)
files.append(f.read())
sources.append(myFile)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = CharacterTextSplitter(chunk_size=800, separator="\n")
docs = []
metadatas = []
for i, f in enumerate(files):
splits = text_splitter.split_text(f)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Add each element in docs into FAISS store, keeping a delay between inserting elements so we don't exceed rate limit
store = None
for (index, chunk) in tqdm(enumerate(docs)):
if index == 0:
store = FAISS.from_texts([chunk], OpenAIEmbeddings())
else:
time.sleep(1) # wait for a second to not exceed any rate limits
store.add_texts([chunk])
# print('finished with index '+index.__str__())
print('Done yayy!')
# # Here we create a vector store from the documents and save it to disk.
faiss.write_index(store.index, "docs.index")
store.index = None
with open("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
```
| https://github.com/langchain-ai/langchain/issues/2944 | https://github.com/langchain-ai/langchain/pull/3026 | 3453b7457ca60227430d85e6f6f58a2aafae559d | 19c85aa9907765c0a2dbe7c46e9d5dd2d6df0f30 | "2023-04-15T15:38:36Z" | python | "2023-04-18T03:28:01Z" | tests/unit_tests/chains/test_combine_documents.py | """Test splitting works with just two docs."""
docs = [Document(page_content="foo"), Document(page_content="bar")]
doc_list = _split_list_of_docs(docs, _fake_docs_len_func, 100)
assert doc_list == [docs]
def test__split_list_works_correctly() -> None:
"""Test splitting works correctly."""
docs = [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
Document(page_content="foo" * 2),
Document(page_content="bar"),
Document(page_content="baz"),
]
doc_list = _split_list_of_docs(docs, _fake_docs_len_func, 10)
expected_result = [
[
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
],
[Document(page_content="foo" * 2), Document(page_content="bar")],
[Document(page_content="baz")],
]
assert doc_list == expected_result
def test__collapse_docs_no_metadata() -> None: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,944 | Question Answering over Docs giving cryptic error upon query | After ingesting some markdown files using a slightly modified version of the question-answering over docs example, I ran the qa.py script as it was in the example
```
# qa.py
import faiss
from langchain import OpenAI, HuggingFaceHub, LLMChain
from langchain.chains import VectorDBQAWithSourcesChain
import pickle
import argparse
parser = argparse.ArgumentParser(description='Ask a question to the notion DB.')
parser.add_argument('question', type=str, help='The question to ask the notion DB')
args = parser.parse_args()
# Load the LangChain.
index = faiss.read_index("docs.index")
with open("faiss_store.pkl", "rb") as f:
store = pickle.load(f)
store.index = index
chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=store)
result = chain({"question": args.question})
print(f"Answer: {result['answer']}")
```
Only to get this cryptic error
```
Traceback (most recent call last):
File "C:\Users\ahmad\OneDrive\Desktop\Coding\LANGCHAINSSSSSS\notion-qa\qa.py", line 22, in <module>
result = chain({"question": args.question})
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 146, in __call__
raise e
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 142, in __call__
outputs = self._call(inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\qa_with_sources\base.py", line 97, in _call
answer, _ = self.combine_document_chain.combine_docs(docs, **inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\map_reduce.py", line 150, in combine_docs
num_tokens = length_func(result_docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 77, in prompt_length
inputs = self._get_inputs(docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 64, in _get_inputs
document_info = {
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 65, in <dictcomp>
k: base_info[k] for k in self.document_prompt.input_variables
KeyError: 'source'
```
Here is the code I used for ingesting
|
```
"""This is the logic for ingesting Notion data into LangChain."""
from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
import time
from tqdm import tqdm
# Here we load in the data in the format that Notion exports it in.
folder = list(Path("Notion_DB/").glob("**/*.md"))
files = []
sources = []
for myFile in folder:
with open(myFile, 'r', encoding='utf-8') as f:
print(myFile.name)
files.append(f.read())
sources.append(myFile)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = CharacterTextSplitter(chunk_size=800, separator="\n")
docs = []
metadatas = []
for i, f in enumerate(files):
splits = text_splitter.split_text(f)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Add each element in docs into FAISS store, keeping a delay between inserting elements so we don't exceed rate limit
store = None
for (index, chunk) in tqdm(enumerate(docs)):
if index == 0:
store = FAISS.from_texts([chunk], OpenAIEmbeddings())
else:
time.sleep(1) # wait for a second to not exceed any rate limits
store.add_texts([chunk])
# print('finished with index '+index.__str__())
print('Done yayy!')
# # Here we create a vector store from the documents and save it to disk.
faiss.write_index(store.index, "docs.index")
store.index = None
with open("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
```
| https://github.com/langchain-ai/langchain/issues/2944 | https://github.com/langchain-ai/langchain/pull/3026 | 3453b7457ca60227430d85e6f6f58a2aafae559d | 19c85aa9907765c0a2dbe7c46e9d5dd2d6df0f30 | "2023-04-15T15:38:36Z" | python | "2023-04-18T03:28:01Z" | tests/unit_tests/chains/test_combine_documents.py | """Test collapse documents functionality when no metadata."""
docs = [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
output = _collapse_docs(docs, _fake_combine_docs_func)
expected_output = Document(page_content="foobarbaz")
assert output == expected_output
def test__collapse_docs_one_doc() -> None: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,944 | Question Answering over Docs giving cryptic error upon query | After ingesting some markdown files using a slightly modified version of the question-answering over docs example, I ran the qa.py script as it was in the example
```
# qa.py
import faiss
from langchain import OpenAI, HuggingFaceHub, LLMChain
from langchain.chains import VectorDBQAWithSourcesChain
import pickle
import argparse
parser = argparse.ArgumentParser(description='Ask a question to the notion DB.')
parser.add_argument('question', type=str, help='The question to ask the notion DB')
args = parser.parse_args()
# Load the LangChain.
index = faiss.read_index("docs.index")
with open("faiss_store.pkl", "rb") as f:
store = pickle.load(f)
store.index = index
chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=store)
result = chain({"question": args.question})
print(f"Answer: {result['answer']}")
```
Only to get this cryptic error
```
Traceback (most recent call last):
File "C:\Users\ahmad\OneDrive\Desktop\Coding\LANGCHAINSSSSSS\notion-qa\qa.py", line 22, in <module>
result = chain({"question": args.question})
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 146, in __call__
raise e
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\base.py", line 142, in __call__
outputs = self._call(inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\qa_with_sources\base.py", line 97, in _call
answer, _ = self.combine_document_chain.combine_docs(docs, **inputs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\map_reduce.py", line 150, in combine_docs
num_tokens = length_func(result_docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 77, in prompt_length
inputs = self._get_inputs(docs, **kwargs)
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 64, in _get_inputs
document_info = {
File "C:\Users\ahmad\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\langchain\chains\combine_documents\stuff.py", line 65, in <dictcomp>
k: base_info[k] for k in self.document_prompt.input_variables
KeyError: 'source'
```
Here is the code I used for ingesting
|
```
"""This is the logic for ingesting Notion data into LangChain."""
from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
import time
from tqdm import tqdm
# Here we load in the data in the format that Notion exports it in.
folder = list(Path("Notion_DB/").glob("**/*.md"))
files = []
sources = []
for myFile in folder:
with open(myFile, 'r', encoding='utf-8') as f:
print(myFile.name)
files.append(f.read())
sources.append(myFile)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = CharacterTextSplitter(chunk_size=800, separator="\n")
docs = []
metadatas = []
for i, f in enumerate(files):
splits = text_splitter.split_text(f)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Add each element in docs into FAISS store, keeping a delay between inserting elements so we don't exceed rate limit
store = None
for (index, chunk) in tqdm(enumerate(docs)):
if index == 0:
store = FAISS.from_texts([chunk], OpenAIEmbeddings())
else:
time.sleep(1) # wait for a second to not exceed any rate limits
store.add_texts([chunk])
# print('finished with index '+index.__str__())
print('Done yayy!')
# # Here we create a vector store from the documents and save it to disk.
faiss.write_index(store.index, "docs.index")
store.index = None
with open("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
```
| https://github.com/langchain-ai/langchain/issues/2944 | https://github.com/langchain-ai/langchain/pull/3026 | 3453b7457ca60227430d85e6f6f58a2aafae559d | 19c85aa9907765c0a2dbe7c46e9d5dd2d6df0f30 | "2023-04-15T15:38:36Z" | python | "2023-04-18T03:28:01Z" | tests/unit_tests/chains/test_combine_documents.py | """Test collapse documents functionality when only one document present."""
docs = [Document(page_content="foo")]
output = _collapse_docs(docs, _fake_combine_docs_func)
assert output == docs[0]
docs = [Document(page_content="foo", metadata={"source": "a"})]
output = _collapse_docs(docs, _fake_combine_docs_func)
assert output == docs[0]
def test__collapse_docs_metadata() -> None:
"""Test collapse documents functionality when metadata exists."""
metadata1 = {"source": "a", "foo": 2, "bar": "1", "extra1": "foo"}
metadata2 = {"source": "b", "foo": "3", "bar": 2, "extra2": "bar"}
docs = [
Document(page_content="foo", metadata=metadata1),
Document(page_content="bar", metadata=metadata2),
]
output = _collapse_docs(docs, _fake_combine_docs_func)
expected_metadata = {
"source": "a, b",
"foo": "2, 3",
"bar": "1, 2",
"extra1": "foo",
"extra2": "bar",
}
expected_output = Document(page_content="foobar", metadata=expected_metadata)
assert output == expected_output |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,874 | Redundunt piece of code | In Agents -> loading.py on line 40 there is a redundant piece of code.
```
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
``` | https://github.com/langchain-ai/langchain/issues/2874 | https://github.com/langchain-ai/langchain/pull/2934 | b40f90ea042b20440cb7c1a9e70a6e4cd4a0089c | ae7ed31386c10cee1683419a4ab45562830bf8eb | "2023-04-14T05:28:42Z" | python | "2023-04-18T04:05:48Z" | langchain/agents/loading.py | """Functionality for loading agents."""
import json
from pathlib import Path
from typing import Any, Dict, List, Optional, Type, Union
import yaml
from langchain.agents.agent import BaseSingleActionAgent
from langchain.agents.agent_types import AgentType
from langchain.agents.chat.base import ChatAgent
from langchain.agents.conversational.base import ConversationalAgent
from langchain.agents.conversational_chat.base import ConversationalChatAgent
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.react.base import ReActDocstoreAgent
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchAgent
from langchain.agents.tools import Tool
from langchain.chains.loading import load_chain, load_chain_from_config
from langchain.llms.base import BaseLLM
from langchain.utilities.loading import try_load_from_hub
AGENT_TO_CLASS: Dict[AgentType, Type[BaseSingleActionAgent]] = {
AgentType.ZERO_SHOT_REACT_DESCRIPTION: ZeroShotAgent,
AgentType.REACT_DOCSTORE: ReActDocstoreAgent,
AgentType.SELF_ASK_WITH_SEARCH: SelfAskWithSearchAgent,
AgentType.CONVERSATIONAL_REACT_DESCRIPTION: ConversationalAgent,
AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION: ChatAgent,
AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION: ConversationalChatAgent,
}
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,874 | Redundunt piece of code | In Agents -> loading.py on line 40 there is a redundant piece of code.
```
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
``` | https://github.com/langchain-ai/langchain/issues/2874 | https://github.com/langchain-ai/langchain/pull/2934 | b40f90ea042b20440cb7c1a9e70a6e4cd4a0089c | ae7ed31386c10cee1683419a4ab45562830bf8eb | "2023-04-14T05:28:42Z" | python | "2023-04-18T04:05:48Z" | langchain/agents/loading.py | config: dict, llm: BaseLLM, tools: List[Tool], **kwargs: Any
) -> BaseSingleActionAgent:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
def load_agent_from_config(
config: dict,
llm: Optional[BaseLLM] = None,
tools: Optional[List[Tool]] = None,
**kwargs: Any, |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,874 | Redundunt piece of code | In Agents -> loading.py on line 40 there is a redundant piece of code.
```
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
``` | https://github.com/langchain-ai/langchain/issues/2874 | https://github.com/langchain-ai/langchain/pull/2934 | b40f90ea042b20440cb7c1a9e70a6e4cd4a0089c | ae7ed31386c10cee1683419a4ab45562830bf8eb | "2023-04-14T05:28:42Z" | python | "2023-04-18T04:05:48Z" | langchain/agents/loading.py | ) -> BaseSingleActionAgent:
"""Load agent from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify an agent Type in config")
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then LLM must be provided"
)
if tools is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
combined_config = {**config, **kwargs}
return agent_cls(**combined_config)
def load_agent(path: Union[str, Path], **kwargs: Any) -> BaseSingleActionAgent: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,874 | Redundunt piece of code | In Agents -> loading.py on line 40 there is a redundant piece of code.
```
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
``` | https://github.com/langchain-ai/langchain/issues/2874 | https://github.com/langchain-ai/langchain/pull/2934 | b40f90ea042b20440cb7c1a9e70a6e4cd4a0089c | ae7ed31386c10cee1683419a4ab45562830bf8eb | "2023-04-14T05:28:42Z" | python | "2023-04-18T04:05:48Z" | langchain/agents/loading.py | """Unified method for loading a agent from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_agent_from_file, "agents", {"json", "yaml"}
):
return hub_result
else:
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> BaseSingleActionAgent:
"""Load agent from file."""
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
return load_agent_from_config(config, **kwargs) |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,057 | Error when parsing code from LLM response ValueError: Could not parse LLM output: | Sometimes the LLM response (generated code) tends to miss the ending ticks "```". Therefore causing the text parsing to fail due to `not enough values to unpack`.
Suggest to simply the `_, action, _' to just `action` then with index
Error message below
```
> Entering new AgentExecutor chain...
Traceback (most recent call last):
File "E:\open_source_contrib\langchain-venv\lib\site-packages\langchain\agents\chat\output_parser.py", line 17, in parse
_, action, _ = text.split("```")
ValueError: not enough values to unpack (expected 3, got 2)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "E:\open_source_contrib\test.py", line 67, in <module>
agent_msg = agent.run(prompt_template)
File "E:\open_source_contrib\langchain-venv\lib\site-packages\langchain\chains\base.py", line 213, in run
return self(args[0])[self.output_keys[0]]
File "E:\open_source_contrib\langchain-venv\lib\site-packages\langchain\chains\base.py", line 116, in __call__
raise e
File "E:\open_source_contrib\langchain-venv\lib\site-packages\langchain\chains\base.py", line 113, in __call__
outputs = self._call(inputs)
File "E:\open_source_contrib\langchain-venv\lib\site-packages\langchain\agents\agent.py", line 792, in _call
next_step_output = self._take_next_step(
File "E:\open_source_contrib\langchain-venv\lib\site-packages\langchain\agents\agent.py", line 672, in _take_next_step
output = self.agent.plan(intermediate_steps, **inputs)
File "E:\open_source_contrib\langchain-venv\lib\site-packages\langchain\agents\agent.py", line 385, in plan
return self.output_parser.parse(full_output)
File "E:\open_source_contrib\langchain-venv\lib\site-packages\langchain\agents\chat\output_parser.py", line 23, in parse
raise ValueError(f"Could not parse LLM output: {text}")
ValueError: Could not parse LLM output: Question: How do I put the given data into a pandas dataframe and save it into a csv file at the specified path?
Thought: I need to use the Python REPL tool to import pandas, create a dataframe with the given data, and then use the to_csv method to save it to the specified file path.
Action:
```
{
"action": "Python REPL",
"action_input": "import pandas as pd\n\n# create dataframe\ndata = {\n 'Quarter': ['Q4-2021', 'Q1-2022', 'Q2-2022', 'Q3-2022', 'Q4-2022'],\n 'EPS attributable to common stockholders, diluted (GAAP)': [1.07, 0.95, 0.76, 0.95, 1.07],\n 'EPS attributable to common stockholders, diluted (non-GAAP)': [1.19, 1.05, 0.85, 1.05, 1.19]\n}\ndf = pd.DataFrame(data)\n\n# save to csv\ndf.to_csv('E:\\\\open_source_contrib\\\\output\\\\agent_output.xlsx', index=False)"
}
(langchain-venv) PS E:\open_source_contrib>
``` | https://github.com/langchain-ai/langchain/issues/3057 | https://github.com/langchain-ai/langchain/pull/3058 | db968284f8f3964630f119c95cca923f112ad47b | 2984ad39645c80411cee5e7f77a3c116b88d008e | "2023-04-18T04:13:20Z" | python | "2023-04-18T04:42:13Z" | langchain/agents/chat/output_parser.py | import json
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish
FINAL_ANSWER_ACTION = "Final Answer:"
class ChatOutputParser(AgentOutputParser):
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if FINAL_ANSWER_ACTION in text:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
try:
_, action, _ = text.split("```")
response = json.loads(action.strip())
return AgentAction(response["action"], response["action_input"], text)
except Exception:
raise ValueError(f"Could not parse LLM output: {text}") |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,157 | Missing Observation and Thought prefix in output | The console output when running a tool is missing the "Observation" and "Thought" prefixes.
I noticed this when using the SQL Toolkit, but other tools are likely affected.
Here is the current INCORRECT output format:
```
> Entering new AgentExecutor chain...
Action: list_tables_sql_db
Action Input: ""invoice_items, invoices, tracks, sqlite_sequence, employees, media_types, sqlite_stat1, customers, playlists, playlist_track, albums, genres, artistsThere is a table called "employees" that I can query.
Action: schema_sql_db
Action Input: "employees"
```
Here is the expected output format:
```
> Entering new AgentExecutor chain...
Action: list_tables_sql_db
Action Input: ""
Observation: invoice_items, invoices, tracks, sqlite_sequence, employees, media_types, sqlite_stat1, customers, playlists, playlist_track, albums, genres, artists
Thought:There is a table called "employees" that I can query.
Action: schema_sql_db
Action Input: "employees"
```
Note: this appears to only affect the console output. The `agent_scratchpad` is updated correctly with the "Observation" and "Thought" prefixes. | https://github.com/langchain-ai/langchain/issues/3157 | https://github.com/langchain-ai/langchain/pull/3158 | 126d7f11dd17a8ea71a4427951f10cefc862ba3a | 0b542661b46d42ee501c6681a4519f2c4e76de23 | "2023-04-19T15:15:26Z" | python | "2023-04-19T16:00:10Z" | langchain/tools/base.py | """Base implementation for tools or skills."""
from abc import ABC, abstractmethod
from inspect import signature
from typing import Any, Dict, Optional, Sequence, Tuple, Type, Union
from pydantic import BaseModel, Extra, Field, validate_arguments, validator
from langchain.callbacks import get_callback_manager
from langchain.callbacks.base import BaseCallbackManager
def _to_args_and_kwargs(run_input: Union[str, Dict]) -> Tuple[Sequence, dict]:
if isinstance(run_input, str):
return (run_input,), {}
else:
return [], run_input
class BaseTool(ABC, BaseModel):
"""Interface LangChain tools must implement."""
name: str
description: str
args_schema: Optional[Type[BaseModel]] = None
"""Pydantic model class to validate and parse the tool's input arguments."""
return_direct: bool = False
verbose: bool = False
callback_manager: BaseCallbackManager = Field(default_factory=get_callback_manager)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def args(self) -> dict: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,157 | Missing Observation and Thought prefix in output | The console output when running a tool is missing the "Observation" and "Thought" prefixes.
I noticed this when using the SQL Toolkit, but other tools are likely affected.
Here is the current INCORRECT output format:
```
> Entering new AgentExecutor chain...
Action: list_tables_sql_db
Action Input: ""invoice_items, invoices, tracks, sqlite_sequence, employees, media_types, sqlite_stat1, customers, playlists, playlist_track, albums, genres, artistsThere is a table called "employees" that I can query.
Action: schema_sql_db
Action Input: "employees"
```
Here is the expected output format:
```
> Entering new AgentExecutor chain...
Action: list_tables_sql_db
Action Input: ""
Observation: invoice_items, invoices, tracks, sqlite_sequence, employees, media_types, sqlite_stat1, customers, playlists, playlist_track, albums, genres, artists
Thought:There is a table called "employees" that I can query.
Action: schema_sql_db
Action Input: "employees"
```
Note: this appears to only affect the console output. The `agent_scratchpad` is updated correctly with the "Observation" and "Thought" prefixes. | https://github.com/langchain-ai/langchain/issues/3157 | https://github.com/langchain-ai/langchain/pull/3158 | 126d7f11dd17a8ea71a4427951f10cefc862ba3a | 0b542661b46d42ee501c6681a4519f2c4e76de23 | "2023-04-19T15:15:26Z" | python | "2023-04-19T16:00:10Z" | langchain/tools/base.py | if self.args_schema is not None:
return self.args_schema.schema()["properties"]
else:
inferred_model = validate_arguments(self._run).model
schema = inferred_model.schema()["properties"]
valid_keys = signature(self._run).parameters
return {k: schema[k] for k in valid_keys}
def _parse_input(
self,
tool_input: Union[str, Dict],
) -> None:
"""Convert tool input to pydantic model."""
input_args = self.args_schema
if isinstance(tool_input, str):
if input_args is not None:
key_ = next(iter(input_args.__fields__.keys()))
input_args.validate({key_: tool_input})
else:
if input_args is not None:
input_args.validate(tool_input)
@validator("callback_manager", pre=True, always=True)
def set_callback_manager(
cls, callback_manager: Optional[BaseCallbackManager]
) -> BaseCallbackManager:
"""If callback manager is None, set it.
This allows users to pass in None as callback manager, which is a nice UX.
"""
return callback_manager or get_callback_manager()
@abstractmethod
def _run(self, *args: Any, **kwargs: Any) -> str: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,157 | Missing Observation and Thought prefix in output | The console output when running a tool is missing the "Observation" and "Thought" prefixes.
I noticed this when using the SQL Toolkit, but other tools are likely affected.
Here is the current INCORRECT output format:
```
> Entering new AgentExecutor chain...
Action: list_tables_sql_db
Action Input: ""invoice_items, invoices, tracks, sqlite_sequence, employees, media_types, sqlite_stat1, customers, playlists, playlist_track, albums, genres, artistsThere is a table called "employees" that I can query.
Action: schema_sql_db
Action Input: "employees"
```
Here is the expected output format:
```
> Entering new AgentExecutor chain...
Action: list_tables_sql_db
Action Input: ""
Observation: invoice_items, invoices, tracks, sqlite_sequence, employees, media_types, sqlite_stat1, customers, playlists, playlist_track, albums, genres, artists
Thought:There is a table called "employees" that I can query.
Action: schema_sql_db
Action Input: "employees"
```
Note: this appears to only affect the console output. The `agent_scratchpad` is updated correctly with the "Observation" and "Thought" prefixes. | https://github.com/langchain-ai/langchain/issues/3157 | https://github.com/langchain-ai/langchain/pull/3158 | 126d7f11dd17a8ea71a4427951f10cefc862ba3a | 0b542661b46d42ee501c6681a4519f2c4e76de23 | "2023-04-19T15:15:26Z" | python | "2023-04-19T16:00:10Z" | langchain/tools/base.py | """Use the tool."""
@abstractmethod
async def _arun(self, *args: Any, **kwargs: Any) -> str:
"""Use the tool asynchronously."""
def run(
self,
tool_input: Union[str, Dict],
verbose: Optional[bool] = None,
start_color: Optional[str] = "green",
color: Optional[str] = "green",
**kwargs: Any,
) -> str:
"""Run the tool."""
self._parse_input(tool_input)
if not self.verbose and verbose is not None:
verbose_ = verbose
else:
verbose_ = self.verbose
self.callback_manager.on_tool_start(
{"name": self.name, "description": self.description},
tool_input if isinstance(tool_input, str) else str(tool_input),
verbose=verbose_,
color=start_color,
**kwargs,
)
try:
args, kwargs = _to_args_and_kwargs(tool_input)
observation = self._run(*args, **kwargs) |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,157 | Missing Observation and Thought prefix in output | The console output when running a tool is missing the "Observation" and "Thought" prefixes.
I noticed this when using the SQL Toolkit, but other tools are likely affected.
Here is the current INCORRECT output format:
```
> Entering new AgentExecutor chain...
Action: list_tables_sql_db
Action Input: ""invoice_items, invoices, tracks, sqlite_sequence, employees, media_types, sqlite_stat1, customers, playlists, playlist_track, albums, genres, artistsThere is a table called "employees" that I can query.
Action: schema_sql_db
Action Input: "employees"
```
Here is the expected output format:
```
> Entering new AgentExecutor chain...
Action: list_tables_sql_db
Action Input: ""
Observation: invoice_items, invoices, tracks, sqlite_sequence, employees, media_types, sqlite_stat1, customers, playlists, playlist_track, albums, genres, artists
Thought:There is a table called "employees" that I can query.
Action: schema_sql_db
Action Input: "employees"
```
Note: this appears to only affect the console output. The `agent_scratchpad` is updated correctly with the "Observation" and "Thought" prefixes. | https://github.com/langchain-ai/langchain/issues/3157 | https://github.com/langchain-ai/langchain/pull/3158 | 126d7f11dd17a8ea71a4427951f10cefc862ba3a | 0b542661b46d42ee501c6681a4519f2c4e76de23 | "2023-04-19T15:15:26Z" | python | "2023-04-19T16:00:10Z" | langchain/tools/base.py | except (Exception, KeyboardInterrupt) as e:
self.callback_manager.on_tool_error(e, verbose=verbose_)
raise e
self.callback_manager.on_tool_end(
observation, verbose=verbose_, color=color, name=self.name, **kwargs
)
return observation
async def arun(
self,
tool_input: Union[str, Dict],
verbose: Optional[bool] = None,
start_color: Optional[str] = "green",
color: Optional[str] = "green",
**kwargs: Any,
) -> str:
"""Run the tool asynchronously."""
self._parse_input(tool_input)
if not self.verbose and verbose is not None:
verbose_ = verbose
else:
verbose_ = self.verbose
if self.callback_manager.is_async:
await self.callback_manager.on_tool_start(
{"name": self.name, "description": self.description},
tool_input if isinstance(tool_input, str) else str(tool_input),
verbose=verbose_,
color=start_color,
**kwargs,
)
else: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,157 | Missing Observation and Thought prefix in output | The console output when running a tool is missing the "Observation" and "Thought" prefixes.
I noticed this when using the SQL Toolkit, but other tools are likely affected.
Here is the current INCORRECT output format:
```
> Entering new AgentExecutor chain...
Action: list_tables_sql_db
Action Input: ""invoice_items, invoices, tracks, sqlite_sequence, employees, media_types, sqlite_stat1, customers, playlists, playlist_track, albums, genres, artistsThere is a table called "employees" that I can query.
Action: schema_sql_db
Action Input: "employees"
```
Here is the expected output format:
```
> Entering new AgentExecutor chain...
Action: list_tables_sql_db
Action Input: ""
Observation: invoice_items, invoices, tracks, sqlite_sequence, employees, media_types, sqlite_stat1, customers, playlists, playlist_track, albums, genres, artists
Thought:There is a table called "employees" that I can query.
Action: schema_sql_db
Action Input: "employees"
```
Note: this appears to only affect the console output. The `agent_scratchpad` is updated correctly with the "Observation" and "Thought" prefixes. | https://github.com/langchain-ai/langchain/issues/3157 | https://github.com/langchain-ai/langchain/pull/3158 | 126d7f11dd17a8ea71a4427951f10cefc862ba3a | 0b542661b46d42ee501c6681a4519f2c4e76de23 | "2023-04-19T15:15:26Z" | python | "2023-04-19T16:00:10Z" | langchain/tools/base.py | self.callback_manager.on_tool_start(
{"name": self.name, "description": self.description},
tool_input if isinstance(tool_input, str) else str(tool_input),
verbose=verbose_,
color=start_color,
**kwargs,
)
try:
args, kwargs = _to_args_and_kwargs(tool_input)
observation = await self._arun(*args, **kwargs)
except (Exception, KeyboardInterrupt) as e:
if self.callback_manager.is_async:
await self.callback_manager.on_tool_error(e, verbose=verbose_)
else:
self.callback_manager.on_tool_error(e, verbose=verbose_)
raise e
if self.callback_manager.is_async:
await self.callback_manager.on_tool_end(
observation, verbose=verbose_, color=color, name=self.name, **kwargs
)
else:
self.callback_manager.on_tool_end(
observation, verbose=verbose_, color=color, name=self.name, **kwargs
)
return observation
def __call__(self, tool_input: str) -> str:
"""Make tool callable."""
return self.run(tool_input) |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,301 | Output using llamacpp is garbage | Hi there,
Trying to setup a langchain with llamacpp as a first step to use langchain offline:
`from langchain.llms import LlamaCpp
llm = LlamaCpp(model_path="../llama/models/ggml-vicuna-13b-4bit-rev1.bin")
text = "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step."
print(llm(text))`
The result is:
`Plenement that whciation - if a praged and as Work 1 -- but a nice bagingrading per 1, In Homewooded ETenscent is the 0sm toth, ECORO Efph at as an outs! ce, found unprint this a PC, Thom. The RxR-1 dot emD In Not OslKNOT
The Home On-a-a-a-aEOEfa-a-aP E. NOT, hotness of-aEF and Life in better-A (resondri Euler, rsa! Home WI Retection and O no-aL25 1 fate to Hosp doubate, p. T, this guiltEisenR-getus WEFI, duro as these disksada Tl.Eis-aRDA* plantly-aRing the Prospecttypen`
Running the same question using llama_cpp_python with the same model bin file, the result is (allthough wrong, correctly formatted):
`{
"id": "cmpl-d64b69f6-cd50-41e9-8d1c-25b1a5859fac",
"object": "text_completion",
"created": 1682085552,
"model": "./models/ggml-alpaca-7b-native-q4.bin",
"choices": [
{
"text": "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step. Justin was born in 1985, so he was born in the same year as the Super Bowl victory of the Chicago Bears in 1986. So, the answer is the Chicago Bears!",
"index": 0,
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 32,
"completion_tokens": 45,
"total_tokens": 77
}
}`
What could be the issue, encoding/decoding? | https://github.com/langchain-ai/langchain/issues/3301 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-21T14:01:59Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | """Wrapper around llama.cpp."""
import logging
from typing import Any, Dict, List, Optional
from pydantic import Field, root_validator
from langchain.llms.base import LLM
logger = logging.getLogger(__name__)
class LlamaCpp(LLM): |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,301 | Output using llamacpp is garbage | Hi there,
Trying to setup a langchain with llamacpp as a first step to use langchain offline:
`from langchain.llms import LlamaCpp
llm = LlamaCpp(model_path="../llama/models/ggml-vicuna-13b-4bit-rev1.bin")
text = "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step."
print(llm(text))`
The result is:
`Plenement that whciation - if a praged and as Work 1 -- but a nice bagingrading per 1, In Homewooded ETenscent is the 0sm toth, ECORO Efph at as an outs! ce, found unprint this a PC, Thom. The RxR-1 dot emD In Not OslKNOT
The Home On-a-a-a-aEOEfa-a-aP E. NOT, hotness of-aEF and Life in better-A (resondri Euler, rsa! Home WI Retection and O no-aL25 1 fate to Hosp doubate, p. T, this guiltEisenR-getus WEFI, duro as these disksada Tl.Eis-aRDA* plantly-aRing the Prospecttypen`
Running the same question using llama_cpp_python with the same model bin file, the result is (allthough wrong, correctly formatted):
`{
"id": "cmpl-d64b69f6-cd50-41e9-8d1c-25b1a5859fac",
"object": "text_completion",
"created": 1682085552,
"model": "./models/ggml-alpaca-7b-native-q4.bin",
"choices": [
{
"text": "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step. Justin was born in 1985, so he was born in the same year as the Super Bowl victory of the Chicago Bears in 1986. So, the answer is the Chicago Bears!",
"index": 0,
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 32,
"completion_tokens": 45,
"total_tokens": 77
}
}`
What could be the issue, encoding/decoding? | https://github.com/langchain-ai/langchain/issues/3301 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-21T14:01:59Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | """Wrapper around the llama.cpp model.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain.llms import LlamaCppEmbeddings
llm = LlamaCppEmbeddings(model_path="/path/to/llama/model")
"""
client: Any
model_path: str
"""The path to the Llama model file."""
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights.""" |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,301 | Output using llamacpp is garbage | Hi there,
Trying to setup a langchain with llamacpp as a first step to use langchain offline:
`from langchain.llms import LlamaCpp
llm = LlamaCpp(model_path="../llama/models/ggml-vicuna-13b-4bit-rev1.bin")
text = "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step."
print(llm(text))`
The result is:
`Plenement that whciation - if a praged and as Work 1 -- but a nice bagingrading per 1, In Homewooded ETenscent is the 0sm toth, ECORO Efph at as an outs! ce, found unprint this a PC, Thom. The RxR-1 dot emD In Not OslKNOT
The Home On-a-a-a-aEOEfa-a-aP E. NOT, hotness of-aEF and Life in better-A (resondri Euler, rsa! Home WI Retection and O no-aL25 1 fate to Hosp doubate, p. T, this guiltEisenR-getus WEFI, duro as these disksada Tl.Eis-aRDA* plantly-aRing the Prospecttypen`
Running the same question using llama_cpp_python with the same model bin file, the result is (allthough wrong, correctly formatted):
`{
"id": "cmpl-d64b69f6-cd50-41e9-8d1c-25b1a5859fac",
"object": "text_completion",
"created": 1682085552,
"model": "./models/ggml-alpaca-7b-native-q4.bin",
"choices": [
{
"text": "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step. Justin was born in 1985, so he was born in the same year as the Super Bowl victory of the Chicago Bears in 1986. So, the answer is the Chicago Bears!",
"index": 0,
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 32,
"completion_tokens": 45,
"total_tokens": 77
}
}`
What could be the issue, encoding/decoding? | https://github.com/langchain-ai/langchain/issues/3301 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-21T14:01:59Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use.
If None, the number of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
suffix: Optional[str] = Field(None)
"""A suffix to append to the generated text. If None, no suffix is appended."""
max_tokens: Optional[int] = 256
"""The maximum number of tokens to generate."""
temperature: Optional[float] = 0.8
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.95
"""The top-p value to use for sampling."""
logprobs: Optional[int] = Field(None)
"""The number of logprobs to return. If None, no logprobs are returned."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_penalty: Optional[float] = 1.1
"""The penalty to apply to repeated tokens."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
last_n_tokens_size: Optional[int] = 64
"""The number of tokens to look back when applying the repeat_penalty."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,301 | Output using llamacpp is garbage | Hi there,
Trying to setup a langchain with llamacpp as a first step to use langchain offline:
`from langchain.llms import LlamaCpp
llm = LlamaCpp(model_path="../llama/models/ggml-vicuna-13b-4bit-rev1.bin")
text = "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step."
print(llm(text))`
The result is:
`Plenement that whciation - if a praged and as Work 1 -- but a nice bagingrading per 1, In Homewooded ETenscent is the 0sm toth, ECORO Efph at as an outs! ce, found unprint this a PC, Thom. The RxR-1 dot emD In Not OslKNOT
The Home On-a-a-a-aEOEfa-a-aP E. NOT, hotness of-aEF and Life in better-A (resondri Euler, rsa! Home WI Retection and O no-aL25 1 fate to Hosp doubate, p. T, this guiltEisenR-getus WEFI, duro as these disksada Tl.Eis-aRDA* plantly-aRing the Prospecttypen`
Running the same question using llama_cpp_python with the same model bin file, the result is (allthough wrong, correctly formatted):
`{
"id": "cmpl-d64b69f6-cd50-41e9-8d1c-25b1a5859fac",
"object": "text_completion",
"created": 1682085552,
"model": "./models/ggml-alpaca-7b-native-q4.bin",
"choices": [
{
"text": "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step. Justin was born in 1985, so he was born in the same year as the Super Bowl victory of the Chicago Bears in 1986. So, the answer is the Chicago Bears!",
"index": 0,
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 32,
"completion_tokens": 45,
"total_tokens": 77
}
}`
What could be the issue, encoding/decoding? | https://github.com/langchain-ai/langchain/issues/3301 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-21T14:01:59Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | """Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
n_ctx = values["n_ctx"]
n_parts = values["n_parts"]
seed = values["seed"]
f16_kv = values["f16_kv"]
logits_all = values["logits_all"]
vocab_only = values["vocab_only"] |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,301 | Output using llamacpp is garbage | Hi there,
Trying to setup a langchain with llamacpp as a first step to use langchain offline:
`from langchain.llms import LlamaCpp
llm = LlamaCpp(model_path="../llama/models/ggml-vicuna-13b-4bit-rev1.bin")
text = "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step."
print(llm(text))`
The result is:
`Plenement that whciation - if a praged and as Work 1 -- but a nice bagingrading per 1, In Homewooded ETenscent is the 0sm toth, ECORO Efph at as an outs! ce, found unprint this a PC, Thom. The RxR-1 dot emD In Not OslKNOT
The Home On-a-a-a-aEOEfa-a-aP E. NOT, hotness of-aEF and Life in better-A (resondri Euler, rsa! Home WI Retection and O no-aL25 1 fate to Hosp doubate, p. T, this guiltEisenR-getus WEFI, duro as these disksada Tl.Eis-aRDA* plantly-aRing the Prospecttypen`
Running the same question using llama_cpp_python with the same model bin file, the result is (allthough wrong, correctly formatted):
`{
"id": "cmpl-d64b69f6-cd50-41e9-8d1c-25b1a5859fac",
"object": "text_completion",
"created": 1682085552,
"model": "./models/ggml-alpaca-7b-native-q4.bin",
"choices": [
{
"text": "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step. Justin was born in 1985, so he was born in the same year as the Super Bowl victory of the Chicago Bears in 1986. So, the answer is the Chicago Bears!",
"index": 0,
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 32,
"completion_tokens": 45,
"total_tokens": 77
}
}`
What could be the issue, encoding/decoding? | https://github.com/langchain-ai/langchain/issues/3301 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-21T14:01:59Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | use_mlock = values["use_mlock"]
n_threads = values["n_threads"]
n_batch = values["n_batch"]
last_n_tokens_size = values["last_n_tokens_size"]
try:
from llama_cpp import Llama
values["client"] = Llama(
model_path=model_path,
n_ctx=n_ctx,
n_parts=n_parts,
seed=seed,
f16_kv=f16_kv,
logits_all=logits_all,
vocab_only=vocab_only,
use_mlock=use_mlock,
n_threads=n_threads,
n_batch=n_batch,
last_n_tokens_size=last_n_tokens_size,
)
except ImportError:
raise ModuleNotFoundError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception:
raise NameError(f"Could not load Llama model from path: {model_path}")
return values
@property
def _default_params(self) -> Dict[str, Any]: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,301 | Output using llamacpp is garbage | Hi there,
Trying to setup a langchain with llamacpp as a first step to use langchain offline:
`from langchain.llms import LlamaCpp
llm = LlamaCpp(model_path="../llama/models/ggml-vicuna-13b-4bit-rev1.bin")
text = "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step."
print(llm(text))`
The result is:
`Plenement that whciation - if a praged and as Work 1 -- but a nice bagingrading per 1, In Homewooded ETenscent is the 0sm toth, ECORO Efph at as an outs! ce, found unprint this a PC, Thom. The RxR-1 dot emD In Not OslKNOT
The Home On-a-a-a-aEOEfa-a-aP E. NOT, hotness of-aEF and Life in better-A (resondri Euler, rsa! Home WI Retection and O no-aL25 1 fate to Hosp doubate, p. T, this guiltEisenR-getus WEFI, duro as these disksada Tl.Eis-aRDA* plantly-aRing the Prospecttypen`
Running the same question using llama_cpp_python with the same model bin file, the result is (allthough wrong, correctly formatted):
`{
"id": "cmpl-d64b69f6-cd50-41e9-8d1c-25b1a5859fac",
"object": "text_completion",
"created": 1682085552,
"model": "./models/ggml-alpaca-7b-native-q4.bin",
"choices": [
{
"text": "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step. Justin was born in 1985, so he was born in the same year as the Super Bowl victory of the Chicago Bears in 1986. So, the answer is the Chicago Bears!",
"index": 0,
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 32,
"completion_tokens": 45,
"total_tokens": 77
}
}`
What could be the issue, encoding/decoding? | https://github.com/langchain-ai/langchain/issues/3301 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-21T14:01:59Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | """Get the default parameters for calling llama_cpp."""
return {
"suffix": self.suffix,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"logprobs": self.logprobs,
"echo": self.echo,
"stop_sequences": self.stop,
"repeat_penalty": self.repeat_penalty,
"top_k": self.top_k,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_path": self.model_path}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "llama.cpp"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call the Llama model and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,301 | Output using llamacpp is garbage | Hi there,
Trying to setup a langchain with llamacpp as a first step to use langchain offline:
`from langchain.llms import LlamaCpp
llm = LlamaCpp(model_path="../llama/models/ggml-vicuna-13b-4bit-rev1.bin")
text = "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step."
print(llm(text))`
The result is:
`Plenement that whciation - if a praged and as Work 1 -- but a nice bagingrading per 1, In Homewooded ETenscent is the 0sm toth, ECORO Efph at as an outs! ce, found unprint this a PC, Thom. The RxR-1 dot emD In Not OslKNOT
The Home On-a-a-a-aEOEfa-a-aP E. NOT, hotness of-aEF and Life in better-A (resondri Euler, rsa! Home WI Retection and O no-aL25 1 fate to Hosp doubate, p. T, this guiltEisenR-getus WEFI, duro as these disksada Tl.Eis-aRDA* plantly-aRing the Prospecttypen`
Running the same question using llama_cpp_python with the same model bin file, the result is (allthough wrong, correctly formatted):
`{
"id": "cmpl-d64b69f6-cd50-41e9-8d1c-25b1a5859fac",
"object": "text_completion",
"created": 1682085552,
"model": "./models/ggml-alpaca-7b-native-q4.bin",
"choices": [
{
"text": "Question: What NFL team won the Super Bowl in the year Justin Bieber was born? Answer: Let's think step by step. Justin was born in 1985, so he was born in the same year as the Super Bowl victory of the Chicago Bears in 1986. So, the answer is the Chicago Bears!",
"index": 0,
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 32,
"completion_tokens": 45,
"total_tokens": 77
}
}`
What could be the issue, encoding/decoding? | https://github.com/langchain-ai/langchain/issues/3301 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-21T14:01:59Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | The generated text.
Example:
.. code-block:: python
from langchain.llms import LlamaCppEmbeddings
llm = LlamaCppEmbeddings(model_path="/path/to/local/llama/model.bin")
llm("This is a prompt.")
"""
params = self._default_params
if self.stop and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop:
params["stop_sequences"] = self.stop
elif stop:
params["stop_sequences"] = stop
else:
params["stop_sequences"] = []
"""Call the Llama model and return the output."""
text = self.client(
prompt=prompt,
max_tokens=params["max_tokens"],
temperature=params["temperature"],
top_p=params["top_p"],
logprobs=params["logprobs"],
echo=params["echo"],
stop=params["stop_sequences"],
repeat_penalty=params["repeat_penalty"],
top_k=params["top_k"],
)
return text["choices"][0]["text"] |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,241 | llama.cpp => model runs fine but bad output | Hi,
Windows 11 environement
Python: 3.10.11
I installed
- llama-cpp-python and it works fine and provides output
- transformers
- pytorch
Code run:
```
from langchain.llms import LlamaCpp
from langchain import PromptTemplate, LLMChain
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = LlamaCpp(model_path=r"D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin")
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What is the capital of Belgium?"
llm_chain.run(question)
```
Output:
```
llama.cpp: loading model from D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin
llama_model_load_internal: format = ggjt v1 (latest)
llama_model_load_internal: n_vocab = 32000
llama_model_load_internal: n_ctx = 512
llama_model_load_internal: n_embd = 5120
llama_model_load_internal: n_mult = 256
llama_model_load_internal: n_head = 40
llama_model_load_internal: n_layer = 40
llama_model_load_internal: n_rot = 128
llama_model_load_internal: ftype = 4 (mostly Q4_1, some F16)
llama_model_load_internal: n_ff = 13824
llama_model_load_internal: n_parts = 1
llama_model_load_internal: model size = 13B
llama_model_load_internal: ggml ctx size = 73.73 KB
llama_model_load_internal: mem required = 11749.65 MB (+ 3216.00 MB per state)
llama_init_from_file: kv self size = 800.00 MB
AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
llama_print_timings: load time = 2154.68 ms
llama_print_timings: sample time = 75.88 ms / 256 runs ( 0.30 ms per run)
llama_print_timings: prompt eval time = 5060.58 ms / 23 tokens ( 220.03 ms per token)
llama_print_timings: eval time = 72461.40 ms / 255 runs ( 284.16 ms per run)
llama_print_timings: total time = 77664.50 ms
```
But there is no answer to the question.... Am I supposed to Print() something?
| https://github.com/langchain-ai/langchain/issues/3241 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-20T20:36:45Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | """Wrapper around llama.cpp."""
import logging
from typing import Any, Dict, List, Optional
from pydantic import Field, root_validator
from langchain.llms.base import LLM
logger = logging.getLogger(__name__)
class LlamaCpp(LLM): |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,241 | llama.cpp => model runs fine but bad output | Hi,
Windows 11 environement
Python: 3.10.11
I installed
- llama-cpp-python and it works fine and provides output
- transformers
- pytorch
Code run:
```
from langchain.llms import LlamaCpp
from langchain import PromptTemplate, LLMChain
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = LlamaCpp(model_path=r"D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin")
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What is the capital of Belgium?"
llm_chain.run(question)
```
Output:
```
llama.cpp: loading model from D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin
llama_model_load_internal: format = ggjt v1 (latest)
llama_model_load_internal: n_vocab = 32000
llama_model_load_internal: n_ctx = 512
llama_model_load_internal: n_embd = 5120
llama_model_load_internal: n_mult = 256
llama_model_load_internal: n_head = 40
llama_model_load_internal: n_layer = 40
llama_model_load_internal: n_rot = 128
llama_model_load_internal: ftype = 4 (mostly Q4_1, some F16)
llama_model_load_internal: n_ff = 13824
llama_model_load_internal: n_parts = 1
llama_model_load_internal: model size = 13B
llama_model_load_internal: ggml ctx size = 73.73 KB
llama_model_load_internal: mem required = 11749.65 MB (+ 3216.00 MB per state)
llama_init_from_file: kv self size = 800.00 MB
AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
llama_print_timings: load time = 2154.68 ms
llama_print_timings: sample time = 75.88 ms / 256 runs ( 0.30 ms per run)
llama_print_timings: prompt eval time = 5060.58 ms / 23 tokens ( 220.03 ms per token)
llama_print_timings: eval time = 72461.40 ms / 255 runs ( 284.16 ms per run)
llama_print_timings: total time = 77664.50 ms
```
But there is no answer to the question.... Am I supposed to Print() something?
| https://github.com/langchain-ai/langchain/issues/3241 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-20T20:36:45Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | """Wrapper around the llama.cpp model.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain.llms import LlamaCppEmbeddings
llm = LlamaCppEmbeddings(model_path="/path/to/llama/model")
"""
client: Any
model_path: str
"""The path to the Llama model file."""
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights.""" |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,241 | llama.cpp => model runs fine but bad output | Hi,
Windows 11 environement
Python: 3.10.11
I installed
- llama-cpp-python and it works fine and provides output
- transformers
- pytorch
Code run:
```
from langchain.llms import LlamaCpp
from langchain import PromptTemplate, LLMChain
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = LlamaCpp(model_path=r"D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin")
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What is the capital of Belgium?"
llm_chain.run(question)
```
Output:
```
llama.cpp: loading model from D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin
llama_model_load_internal: format = ggjt v1 (latest)
llama_model_load_internal: n_vocab = 32000
llama_model_load_internal: n_ctx = 512
llama_model_load_internal: n_embd = 5120
llama_model_load_internal: n_mult = 256
llama_model_load_internal: n_head = 40
llama_model_load_internal: n_layer = 40
llama_model_load_internal: n_rot = 128
llama_model_load_internal: ftype = 4 (mostly Q4_1, some F16)
llama_model_load_internal: n_ff = 13824
llama_model_load_internal: n_parts = 1
llama_model_load_internal: model size = 13B
llama_model_load_internal: ggml ctx size = 73.73 KB
llama_model_load_internal: mem required = 11749.65 MB (+ 3216.00 MB per state)
llama_init_from_file: kv self size = 800.00 MB
AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
llama_print_timings: load time = 2154.68 ms
llama_print_timings: sample time = 75.88 ms / 256 runs ( 0.30 ms per run)
llama_print_timings: prompt eval time = 5060.58 ms / 23 tokens ( 220.03 ms per token)
llama_print_timings: eval time = 72461.40 ms / 255 runs ( 284.16 ms per run)
llama_print_timings: total time = 77664.50 ms
```
But there is no answer to the question.... Am I supposed to Print() something?
| https://github.com/langchain-ai/langchain/issues/3241 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-20T20:36:45Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use.
If None, the number of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
suffix: Optional[str] = Field(None)
"""A suffix to append to the generated text. If None, no suffix is appended."""
max_tokens: Optional[int] = 256
"""The maximum number of tokens to generate."""
temperature: Optional[float] = 0.8
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.95
"""The top-p value to use for sampling."""
logprobs: Optional[int] = Field(None)
"""The number of logprobs to return. If None, no logprobs are returned."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_penalty: Optional[float] = 1.1
"""The penalty to apply to repeated tokens."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
last_n_tokens_size: Optional[int] = 64
"""The number of tokens to look back when applying the repeat_penalty."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,241 | llama.cpp => model runs fine but bad output | Hi,
Windows 11 environement
Python: 3.10.11
I installed
- llama-cpp-python and it works fine and provides output
- transformers
- pytorch
Code run:
```
from langchain.llms import LlamaCpp
from langchain import PromptTemplate, LLMChain
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = LlamaCpp(model_path=r"D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin")
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What is the capital of Belgium?"
llm_chain.run(question)
```
Output:
```
llama.cpp: loading model from D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin
llama_model_load_internal: format = ggjt v1 (latest)
llama_model_load_internal: n_vocab = 32000
llama_model_load_internal: n_ctx = 512
llama_model_load_internal: n_embd = 5120
llama_model_load_internal: n_mult = 256
llama_model_load_internal: n_head = 40
llama_model_load_internal: n_layer = 40
llama_model_load_internal: n_rot = 128
llama_model_load_internal: ftype = 4 (mostly Q4_1, some F16)
llama_model_load_internal: n_ff = 13824
llama_model_load_internal: n_parts = 1
llama_model_load_internal: model size = 13B
llama_model_load_internal: ggml ctx size = 73.73 KB
llama_model_load_internal: mem required = 11749.65 MB (+ 3216.00 MB per state)
llama_init_from_file: kv self size = 800.00 MB
AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
llama_print_timings: load time = 2154.68 ms
llama_print_timings: sample time = 75.88 ms / 256 runs ( 0.30 ms per run)
llama_print_timings: prompt eval time = 5060.58 ms / 23 tokens ( 220.03 ms per token)
llama_print_timings: eval time = 72461.40 ms / 255 runs ( 284.16 ms per run)
llama_print_timings: total time = 77664.50 ms
```
But there is no answer to the question.... Am I supposed to Print() something?
| https://github.com/langchain-ai/langchain/issues/3241 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-20T20:36:45Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | """Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
n_ctx = values["n_ctx"]
n_parts = values["n_parts"]
seed = values["seed"]
f16_kv = values["f16_kv"]
logits_all = values["logits_all"]
vocab_only = values["vocab_only"] |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,241 | llama.cpp => model runs fine but bad output | Hi,
Windows 11 environement
Python: 3.10.11
I installed
- llama-cpp-python and it works fine and provides output
- transformers
- pytorch
Code run:
```
from langchain.llms import LlamaCpp
from langchain import PromptTemplate, LLMChain
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = LlamaCpp(model_path=r"D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin")
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What is the capital of Belgium?"
llm_chain.run(question)
```
Output:
```
llama.cpp: loading model from D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin
llama_model_load_internal: format = ggjt v1 (latest)
llama_model_load_internal: n_vocab = 32000
llama_model_load_internal: n_ctx = 512
llama_model_load_internal: n_embd = 5120
llama_model_load_internal: n_mult = 256
llama_model_load_internal: n_head = 40
llama_model_load_internal: n_layer = 40
llama_model_load_internal: n_rot = 128
llama_model_load_internal: ftype = 4 (mostly Q4_1, some F16)
llama_model_load_internal: n_ff = 13824
llama_model_load_internal: n_parts = 1
llama_model_load_internal: model size = 13B
llama_model_load_internal: ggml ctx size = 73.73 KB
llama_model_load_internal: mem required = 11749.65 MB (+ 3216.00 MB per state)
llama_init_from_file: kv self size = 800.00 MB
AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
llama_print_timings: load time = 2154.68 ms
llama_print_timings: sample time = 75.88 ms / 256 runs ( 0.30 ms per run)
llama_print_timings: prompt eval time = 5060.58 ms / 23 tokens ( 220.03 ms per token)
llama_print_timings: eval time = 72461.40 ms / 255 runs ( 284.16 ms per run)
llama_print_timings: total time = 77664.50 ms
```
But there is no answer to the question.... Am I supposed to Print() something?
| https://github.com/langchain-ai/langchain/issues/3241 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-20T20:36:45Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | use_mlock = values["use_mlock"]
n_threads = values["n_threads"]
n_batch = values["n_batch"]
last_n_tokens_size = values["last_n_tokens_size"]
try:
from llama_cpp import Llama
values["client"] = Llama(
model_path=model_path,
n_ctx=n_ctx,
n_parts=n_parts,
seed=seed,
f16_kv=f16_kv,
logits_all=logits_all,
vocab_only=vocab_only,
use_mlock=use_mlock,
n_threads=n_threads,
n_batch=n_batch,
last_n_tokens_size=last_n_tokens_size,
)
except ImportError:
raise ModuleNotFoundError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception:
raise NameError(f"Could not load Llama model from path: {model_path}")
return values
@property
def _default_params(self) -> Dict[str, Any]: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,241 | llama.cpp => model runs fine but bad output | Hi,
Windows 11 environement
Python: 3.10.11
I installed
- llama-cpp-python and it works fine and provides output
- transformers
- pytorch
Code run:
```
from langchain.llms import LlamaCpp
from langchain import PromptTemplate, LLMChain
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = LlamaCpp(model_path=r"D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin")
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What is the capital of Belgium?"
llm_chain.run(question)
```
Output:
```
llama.cpp: loading model from D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin
llama_model_load_internal: format = ggjt v1 (latest)
llama_model_load_internal: n_vocab = 32000
llama_model_load_internal: n_ctx = 512
llama_model_load_internal: n_embd = 5120
llama_model_load_internal: n_mult = 256
llama_model_load_internal: n_head = 40
llama_model_load_internal: n_layer = 40
llama_model_load_internal: n_rot = 128
llama_model_load_internal: ftype = 4 (mostly Q4_1, some F16)
llama_model_load_internal: n_ff = 13824
llama_model_load_internal: n_parts = 1
llama_model_load_internal: model size = 13B
llama_model_load_internal: ggml ctx size = 73.73 KB
llama_model_load_internal: mem required = 11749.65 MB (+ 3216.00 MB per state)
llama_init_from_file: kv self size = 800.00 MB
AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
llama_print_timings: load time = 2154.68 ms
llama_print_timings: sample time = 75.88 ms / 256 runs ( 0.30 ms per run)
llama_print_timings: prompt eval time = 5060.58 ms / 23 tokens ( 220.03 ms per token)
llama_print_timings: eval time = 72461.40 ms / 255 runs ( 284.16 ms per run)
llama_print_timings: total time = 77664.50 ms
```
But there is no answer to the question.... Am I supposed to Print() something?
| https://github.com/langchain-ai/langchain/issues/3241 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-20T20:36:45Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | """Get the default parameters for calling llama_cpp."""
return {
"suffix": self.suffix,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"logprobs": self.logprobs,
"echo": self.echo,
"stop_sequences": self.stop,
"repeat_penalty": self.repeat_penalty,
"top_k": self.top_k,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_path": self.model_path}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "llama.cpp"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call the Llama model and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,241 | llama.cpp => model runs fine but bad output | Hi,
Windows 11 environement
Python: 3.10.11
I installed
- llama-cpp-python and it works fine and provides output
- transformers
- pytorch
Code run:
```
from langchain.llms import LlamaCpp
from langchain import PromptTemplate, LLMChain
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = LlamaCpp(model_path=r"D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin")
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What is the capital of Belgium?"
llm_chain.run(question)
```
Output:
```
llama.cpp: loading model from D:\Win10User\Downloads\AI\Model\vicuna-13B-1.1-GPTQ-4bit-128g.GGML.bin
llama_model_load_internal: format = ggjt v1 (latest)
llama_model_load_internal: n_vocab = 32000
llama_model_load_internal: n_ctx = 512
llama_model_load_internal: n_embd = 5120
llama_model_load_internal: n_mult = 256
llama_model_load_internal: n_head = 40
llama_model_load_internal: n_layer = 40
llama_model_load_internal: n_rot = 128
llama_model_load_internal: ftype = 4 (mostly Q4_1, some F16)
llama_model_load_internal: n_ff = 13824
llama_model_load_internal: n_parts = 1
llama_model_load_internal: model size = 13B
llama_model_load_internal: ggml ctx size = 73.73 KB
llama_model_load_internal: mem required = 11749.65 MB (+ 3216.00 MB per state)
llama_init_from_file: kv self size = 800.00 MB
AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
llama_print_timings: load time = 2154.68 ms
llama_print_timings: sample time = 75.88 ms / 256 runs ( 0.30 ms per run)
llama_print_timings: prompt eval time = 5060.58 ms / 23 tokens ( 220.03 ms per token)
llama_print_timings: eval time = 72461.40 ms / 255 runs ( 284.16 ms per run)
llama_print_timings: total time = 77664.50 ms
```
But there is no answer to the question.... Am I supposed to Print() something?
| https://github.com/langchain-ai/langchain/issues/3241 | https://github.com/langchain-ai/langchain/pull/3320 | 3a1bdce3f51e302d468807e980455d676c0f5fd6 | 77bb6c99f7ee189ce3734c47b27e70dc237bbce7 | "2023-04-20T20:36:45Z" | python | "2023-04-23T01:46:55Z" | langchain/llms/llamacpp.py | The generated text.
Example:
.. code-block:: python
from langchain.llms import LlamaCppEmbeddings
llm = LlamaCppEmbeddings(model_path="/path/to/local/llama/model.bin")
llm("This is a prompt.")
"""
params = self._default_params
if self.stop and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop:
params["stop_sequences"] = self.stop
elif stop:
params["stop_sequences"] = stop
else:
params["stop_sequences"] = []
"""Call the Llama model and return the output."""
text = self.client(
prompt=prompt,
max_tokens=params["max_tokens"],
temperature=params["temperature"],
top_p=params["top_p"],
logprobs=params["logprobs"],
echo=params["echo"],
stop=params["stop_sequences"],
repeat_penalty=params["repeat_penalty"],
top_k=params["top_k"],
)
return text["choices"][0]["text"] |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,404 | marathon_times.ipynb: mismatched text and code | Text mentions inflation and tuition:
Here is the prompt comparing inflation and college tuition.
Code is about marathon times:
agent.run(["What were the winning boston marathon times for the past 5 years? Generate a table of the names, countries of origin, and times."]) | https://github.com/langchain-ai/langchain/issues/3404 | https://github.com/langchain-ai/langchain/pull/3408 | b4de839ed8a1bea7425a6923b2cd635068b6015a | 73bc70b4fa7bb69647d9dbe81943b88ce6ccc180 | "2023-04-23T21:06:49Z" | python | "2023-04-24T01:14:11Z" | langchain/tools/ddg_search/__init__.py | """DuckDuckGo Search API toolkit.""" |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,384 | ValueError in cosine_similarity when using FAISS index as vector store | Getting the below error
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...\langchain\vectorstores\faiss.py", line 285, in max_marginal_relevance_search
docs = self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k)
File "...\langchain\vectorstores\faiss.py", line 248, in max_marginal_relevance_search_by_vector
mmr_selected = maximal_marginal_relevance(
File "...\langchain\langchain\vectorstores\utils.py", line 19, in maximal_marginal_relevance
similarity_to_query = cosine_similarity([query_embedding], embedding_list)[0]
File "...\langchain\langchain\math_utils.py", line 16, in cosine_similarity
raise ValueError("Number of columns in X and Y must be the same.")
ValueError: Number of columns in X and Y must be the same.
```
Code to reproduce this error
```
>>> model_name = "sentence-transformers/all-mpnet-base-v2"
>>> model_kwargs = {'device': 'cpu'}
>>> from langchain.embeddings import HuggingFaceEmbeddings
>>> embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
>>> from langchain.vectorstores import FAISS
>>> FAISS_INDEX_PATH = 'faiss_index'
>>> db = FAISS.load_local(FAISS_INDEX_PATH, embeddings)
>>> query = 'query'
>>> results = db.max_marginal_relevance_search(query)
```
While going through the error it seems that in this case `query_embedding` is 1 x model_dimension while embedding_list is no_docs x model dimension vectors. Hence we should probably change the code to `similarity_to_query = cosine_similarity(query_embedding, embedding_list)[0]` i.e. remove the list from the query_embedding.
Since this is a common function not sure if this change would affect other embedding classes as well. | https://github.com/langchain-ai/langchain/issues/3384 | https://github.com/langchain-ai/langchain/pull/3475 | 53b14de636080e09e128d829aafa9ea34ac34a94 | b2564a63911f8a77272ac9e93e5558384f00155c | "2023-04-23T07:51:56Z" | python | "2023-04-25T02:54:15Z" | langchain/math_utils.py | """Math utils."""
from typing import List, Union
import numpy as np
Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray]
def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-width matrices."""
if len(X) == 0 or len(Y) == 0:
return np.array([])
X = np.array(X)
Y = np.array(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError("Number of columns in X and Y must be the same.")
X_norm = np.linalg.norm(X, axis=1)
Y_norm = np.linalg.norm(Y, axis=1)
similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm)
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
return similarity |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,384 | ValueError in cosine_similarity when using FAISS index as vector store | Getting the below error
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...\langchain\vectorstores\faiss.py", line 285, in max_marginal_relevance_search
docs = self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k)
File "...\langchain\vectorstores\faiss.py", line 248, in max_marginal_relevance_search_by_vector
mmr_selected = maximal_marginal_relevance(
File "...\langchain\langchain\vectorstores\utils.py", line 19, in maximal_marginal_relevance
similarity_to_query = cosine_similarity([query_embedding], embedding_list)[0]
File "...\langchain\langchain\math_utils.py", line 16, in cosine_similarity
raise ValueError("Number of columns in X and Y must be the same.")
ValueError: Number of columns in X and Y must be the same.
```
Code to reproduce this error
```
>>> model_name = "sentence-transformers/all-mpnet-base-v2"
>>> model_kwargs = {'device': 'cpu'}
>>> from langchain.embeddings import HuggingFaceEmbeddings
>>> embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
>>> from langchain.vectorstores import FAISS
>>> FAISS_INDEX_PATH = 'faiss_index'
>>> db = FAISS.load_local(FAISS_INDEX_PATH, embeddings)
>>> query = 'query'
>>> results = db.max_marginal_relevance_search(query)
```
While going through the error it seems that in this case `query_embedding` is 1 x model_dimension while embedding_list is no_docs x model dimension vectors. Hence we should probably change the code to `similarity_to_query = cosine_similarity(query_embedding, embedding_list)[0]` i.e. remove the list from the query_embedding.
Since this is a common function not sure if this change would affect other embedding classes as well. | https://github.com/langchain-ai/langchain/issues/3384 | https://github.com/langchain-ai/langchain/pull/3475 | 53b14de636080e09e128d829aafa9ea34ac34a94 | b2564a63911f8a77272ac9e93e5558384f00155c | "2023-04-23T07:51:56Z" | python | "2023-04-25T02:54:15Z" | langchain/vectorstores/utils.py | """Utility functions for working with vectors and vectorstores."""
from typing import List
import numpy as np
from langchain.math_utils import cosine_similarity
def maximal_marginal_relevance( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,384 | ValueError in cosine_similarity when using FAISS index as vector store | Getting the below error
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...\langchain\vectorstores\faiss.py", line 285, in max_marginal_relevance_search
docs = self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k)
File "...\langchain\vectorstores\faiss.py", line 248, in max_marginal_relevance_search_by_vector
mmr_selected = maximal_marginal_relevance(
File "...\langchain\langchain\vectorstores\utils.py", line 19, in maximal_marginal_relevance
similarity_to_query = cosine_similarity([query_embedding], embedding_list)[0]
File "...\langchain\langchain\math_utils.py", line 16, in cosine_similarity
raise ValueError("Number of columns in X and Y must be the same.")
ValueError: Number of columns in X and Y must be the same.
```
Code to reproduce this error
```
>>> model_name = "sentence-transformers/all-mpnet-base-v2"
>>> model_kwargs = {'device': 'cpu'}
>>> from langchain.embeddings import HuggingFaceEmbeddings
>>> embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
>>> from langchain.vectorstores import FAISS
>>> FAISS_INDEX_PATH = 'faiss_index'
>>> db = FAISS.load_local(FAISS_INDEX_PATH, embeddings)
>>> query = 'query'
>>> results = db.max_marginal_relevance_search(query)
```
While going through the error it seems that in this case `query_embedding` is 1 x model_dimension while embedding_list is no_docs x model dimension vectors. Hence we should probably change the code to `similarity_to_query = cosine_similarity(query_embedding, embedding_list)[0]` i.e. remove the list from the query_embedding.
Since this is a common function not sure if this change would affect other embedding classes as well. | https://github.com/langchain-ai/langchain/issues/3384 | https://github.com/langchain-ai/langchain/pull/3475 | 53b14de636080e09e128d829aafa9ea34ac34a94 | b2564a63911f8a77272ac9e93e5558384f00155c | "2023-04-23T07:51:56Z" | python | "2023-04-25T02:54:15Z" | langchain/vectorstores/utils.py | query_embedding: np.ndarray,
embedding_list: list,
lambda_mult: float = 0.5,
k: int = 4,
) -> List[int]:
"""Calculate maximal marginal relevance."""
if min(k, len(embedding_list)) <= 0:
return []
similarity_to_query = cosine_similarity([query_embedding], embedding_list)[0]
most_similar = int(np.argmax(similarity_to_query))
idxs = [most_similar]
selected = np.array([embedding_list[most_similar]])
while len(idxs) < min(k, len(embedding_list)):
best_score = -np.inf
idx_to_add = -1
similarity_to_selected = cosine_similarity(embedding_list, selected)
for i, query_score in enumerate(similarity_to_query):
if i in idxs:
continue
redundant_score = max(similarity_to_selected[i])
equation_score = (
lambda_mult * query_score - (1 - lambda_mult) * redundant_score
)
if equation_score > best_score:
best_score = equation_score
idx_to_add = i
idxs.append(idx_to_add)
selected = np.append(selected, [embedding_list[idx_to_add]], axis=0)
return idxs |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 1,766 | Update poetry lock to allow SQLAlchemy v2 | It seems that #1578 adds support for SQLAlchemy v2 but the [poetry lock file](https://github.com/hwchase17/langchain/blob/8685d53adcdd0310e76349ecb4e2b87f980c4673/poetry.lock#L6211) is still at 1.4.46. | https://github.com/langchain-ai/langchain/issues/1766 | https://github.com/langchain-ai/langchain/pull/3310 | 7c2c73af5f15799c9326e99ed15c4a30fd19ad11 | b7658059643cd2f8fa58a2132b7d723638445ebc | "2023-03-19T01:48:23Z" | python | "2023-04-25T04:10:56Z" | langchain/sql_database.py | """SQLAlchemy wrapper around a database."""
from __future__ import annotations
import warnings
from typing import Any, Iterable, List, Optional
from sqlalchemy import MetaData, Table, create_engine, inspect, select, text
from sqlalchemy.engine import Engine
from sqlalchemy.exc import ProgrammingError, SQLAlchemyError
from sqlalchemy.schema import CreateTable
def _format_index(index: dict) -> str:
return (
f'Name: {index["name"]}, Unique: {index["unique"]},'
f' Columns: {str(index["column_names"])}'
)
class SQLDatabase:
"""SQLAlchemy wrapper around a database."""
def __init__(
self,
engine: Engine,
schema: Optional[str] = None,
metadata: Optional[MetaData] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
indexes_in_table_info: bool = False,
custom_table_info: Optional[dict] = None,
view_support: Optional[bool] = False,
):
"""Create engine from database URI."""
self._engine = engine
self._schema = schema |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 1,766 | Update poetry lock to allow SQLAlchemy v2 | It seems that #1578 adds support for SQLAlchemy v2 but the [poetry lock file](https://github.com/hwchase17/langchain/blob/8685d53adcdd0310e76349ecb4e2b87f980c4673/poetry.lock#L6211) is still at 1.4.46. | https://github.com/langchain-ai/langchain/issues/1766 | https://github.com/langchain-ai/langchain/pull/3310 | 7c2c73af5f15799c9326e99ed15c4a30fd19ad11 | b7658059643cd2f8fa58a2132b7d723638445ebc | "2023-03-19T01:48:23Z" | python | "2023-04-25T04:10:56Z" | langchain/sql_database.py | if include_tables and ignore_tables:
raise ValueError("Cannot specify both include_tables and ignore_tables")
self._inspector = inspect(self._engine)
self._all_tables = set(
self._inspector.get_table_names(schema=schema)
+ (self._inspector.get_view_names(schema=schema) if view_support else [])
)
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
self._indexes_in_table_info = indexes_in_table_info
self._custom_table_info = custom_table_info |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 1,766 | Update poetry lock to allow SQLAlchemy v2 | It seems that #1578 adds support for SQLAlchemy v2 but the [poetry lock file](https://github.com/hwchase17/langchain/blob/8685d53adcdd0310e76349ecb4e2b87f980c4673/poetry.lock#L6211) is still at 1.4.46. | https://github.com/langchain-ai/langchain/issues/1766 | https://github.com/langchain-ai/langchain/pull/3310 | 7c2c73af5f15799c9326e99ed15c4a30fd19ad11 | b7658059643cd2f8fa58a2132b7d723638445ebc | "2023-03-19T01:48:23Z" | python | "2023-04-25T04:10:56Z" | langchain/sql_database.py | if self._custom_table_info:
if not isinstance(self._custom_table_info, dict):
raise TypeError(
"table_info must be a dictionary with table names as keys and the "
"desired table info as values"
)
intersection = set(self._custom_table_info).intersection(self._all_tables)
self._custom_table_info = dict(
(table, self._custom_table_info[table])
for table in self._custom_table_info
if table in intersection
)
self._metadata = metadata or MetaData()
self._metadata.reflect(
views=view_support,
bind=self._engine,
only=self._usable_tables,
schema=self._schema,
)
@classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> SQLDatabase:
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
@property
def dialect(self) -> str: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 1,766 | Update poetry lock to allow SQLAlchemy v2 | It seems that #1578 adds support for SQLAlchemy v2 but the [poetry lock file](https://github.com/hwchase17/langchain/blob/8685d53adcdd0310e76349ecb4e2b87f980c4673/poetry.lock#L6211) is still at 1.4.46. | https://github.com/langchain-ai/langchain/issues/1766 | https://github.com/langchain-ai/langchain/pull/3310 | 7c2c73af5f15799c9326e99ed15c4a30fd19ad11 | b7658059643cd2f8fa58a2132b7d723638445ebc | "2023-03-19T01:48:23Z" | python | "2023-04-25T04:10:56Z" | langchain/sql_database.py | """Return string representation of dialect to use."""
return self._engine.dialect.name
def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return self._include_tables
return self._all_tables - self._ignore_tables
def get_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
warnings.warn(
"This method is deprecated - please use `get_usable_table_names`."
)
return self.get_usable_table_names()
@property
def table_info(self) -> str:
"""Information about all tables in the database."""
return self.get_table_info()
def get_table_info(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(f"table_names {missing_tables} not found in database") |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 1,766 | Update poetry lock to allow SQLAlchemy v2 | It seems that #1578 adds support for SQLAlchemy v2 but the [poetry lock file](https://github.com/hwchase17/langchain/blob/8685d53adcdd0310e76349ecb4e2b87f980c4673/poetry.lock#L6211) is still at 1.4.46. | https://github.com/langchain-ai/langchain/issues/1766 | https://github.com/langchain-ai/langchain/pull/3310 | 7c2c73af5f15799c9326e99ed15c4a30fd19ad11 | b7658059643cd2f8fa58a2132b7d723638445ebc | "2023-03-19T01:48:23Z" | python | "2023-04-25T04:10:56Z" | langchain/sql_database.py | all_table_names = table_names
meta_tables = [
tbl
for tbl in self._metadata.sorted_tables
if tbl.name in set(all_table_names)
and not (self.dialect == "sqlite" and tbl.name.startswith("sqlite_"))
]
tables = []
for table in meta_tables:
if self._custom_table_info and table.name in self._custom_table_info:
tables.append(self._custom_table_info[table.name])
continue
create_table = str(CreateTable(table).compile(self._engine))
table_info = f"{create_table.rstrip()}"
has_extra_info = (
self._indexes_in_table_info or self._sample_rows_in_table_info
)
if has_extra_info:
table_info += "\n\n/*"
if self._indexes_in_table_info:
table_info += f"\n{self._get_table_indexes(table)}\n"
if self._sample_rows_in_table_info:
table_info += f"\n{self._get_sample_rows(table)}\n"
if has_extra_info:
table_info += "*/"
tables.append(table_info)
final_str = "\n\n".join(tables)
return final_str
def _get_table_indexes(self, table: Table) -> str: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 1,766 | Update poetry lock to allow SQLAlchemy v2 | It seems that #1578 adds support for SQLAlchemy v2 but the [poetry lock file](https://github.com/hwchase17/langchain/blob/8685d53adcdd0310e76349ecb4e2b87f980c4673/poetry.lock#L6211) is still at 1.4.46. | https://github.com/langchain-ai/langchain/issues/1766 | https://github.com/langchain-ai/langchain/pull/3310 | 7c2c73af5f15799c9326e99ed15c4a30fd19ad11 | b7658059643cd2f8fa58a2132b7d723638445ebc | "2023-03-19T01:48:23Z" | python | "2023-04-25T04:10:56Z" | langchain/sql_database.py | indexes = self._inspector.get_indexes(table.name)
indexes_formatted = "\n".join(map(_format_index, indexes))
return f"Table Indexes:\n{indexes_formatted}"
def _get_sample_rows(self, table: Table) -> str:
command = select([table]).limit(self._sample_rows_in_table_info)
columns_str = "\t".join([col.name for col in table.columns])
try:
with self._engine.connect() as connection:
sample_rows = connection.execute(command)
sample_rows = list(
map(lambda ls: [str(i)[:100] for i in ls], sample_rows)
)
sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows])
except ProgrammingError:
sample_rows_str = ""
return (
f"{self._sample_rows_in_table_info} rows from {table.name} table:\n"
f"{columns_str}\n"
f"{sample_rows_str}"
)
def run(self, command: str, fetch: str = "all") -> str: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 1,766 | Update poetry lock to allow SQLAlchemy v2 | It seems that #1578 adds support for SQLAlchemy v2 but the [poetry lock file](https://github.com/hwchase17/langchain/blob/8685d53adcdd0310e76349ecb4e2b87f980c4673/poetry.lock#L6211) is still at 1.4.46. | https://github.com/langchain-ai/langchain/issues/1766 | https://github.com/langchain-ai/langchain/pull/3310 | 7c2c73af5f15799c9326e99ed15c4a30fd19ad11 | b7658059643cd2f8fa58a2132b7d723638445ebc | "2023-03-19T01:48:23Z" | python | "2023-04-25T04:10:56Z" | langchain/sql_database.py | """Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.begin() as connection:
if self._schema is not None:
connection.exec_driver_sql(f"SET search_path TO {self._schema}")
cursor = connection.execute(text(command))
if cursor.returns_rows:
if fetch == "all":
result = cursor.fetchall()
elif fetch == "one":
result = cursor.fetchone()[0]
else:
raise ValueError("Fetch parameter must be either 'one' or 'all'")
return str(result)
return ""
def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 1,766 | Update poetry lock to allow SQLAlchemy v2 | It seems that #1578 adds support for SQLAlchemy v2 but the [poetry lock file](https://github.com/hwchase17/langchain/blob/8685d53adcdd0310e76349ecb4e2b87f980c4673/poetry.lock#L6211) is still at 1.4.46. | https://github.com/langchain-ai/langchain/issues/1766 | https://github.com/langchain-ai/langchain/pull/3310 | 7c2c73af5f15799c9326e99ed15c4a30fd19ad11 | b7658059643cd2f8fa58a2132b7d723638445ebc | "2023-03-19T01:48:23Z" | python | "2023-04-25T04:10:56Z" | langchain/sql_database.py | """Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f"Error: {e}"
def run_no_throw(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
"""
try:
return self.run(command, fetch)
except SQLAlchemyError as e:
"""Format the error message"""
return f"Error: {e}" |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | langchain/vectorstores/weaviate.py | """Wrapper around weaviate vector database."""
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional, Type
from uuid import uuid4
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
def _default_schema(index_name: str) -> Dict:
return {
"class": index_name,
"properties": [
{
"name": "text",
"dataType": ["text"],
}
],
}
def _create_weaviate_client(**kwargs: Any) -> Any: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | langchain/vectorstores/weaviate.py | client = kwargs.get("client")
if client is not None:
return client
weaviate_url = get_from_dict_or_env(kwargs, "weaviate_url", "WEAVIATE_URL")
weaviate_api_key = get_from_dict_or_env(
kwargs, "weaviate_api_key", "WEAVIATE_API_KEY", None
)
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip instal weaviate-client`"
)
auth = (
weaviate.auth.AuthApiKey(api_key=weaviate_api_key)
if weaviate_api_key is not None
else None
)
client = weaviate.Client(weaviate_url, auth_client_secret=auth)
return client
class Weaviate(VectorStore):
"""Wrapper around Weaviate vector database.
To use, you should have the ``weaviate-client`` python package installed.
Example:
.. code-block:: python
import weaviate
from langchain.vectorstores import Weaviate
client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...) |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | langchain/vectorstores/weaviate.py | weaviate = Weaviate(client, index_name, text_key)
"""
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
def add_texts( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | langchain/vectorstores/weaviate.py | self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
batch.add_data_object(
data_object=data_properties, class_name=self._index_name, uuid=_id
)
ids.append(_id)
return ids
def similarity_search( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | langchain/vectorstores/weaviate.py | self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
content: Dict[str, Any] = {"concepts": [query]}
if kwargs.get("search_distance"):
content["certainty"] = kwargs.get("search_distance")
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
result = query_obj.with_near_text(content).with_limit(k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
def similarity_search_by_vector( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | langchain/vectorstores/weaviate.py | self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Look up similar documents by embedding vector in Weaviate."""
vector = {"vector": embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
result = query_obj.with_near_vector(vector).with_limit(k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4. |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | langchain/vectorstores/weaviate.py | fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding is not None:
embedding = self._embedding.embed_query(query)
else:
raise ValueError(
"max_marginal_relevance_search requires a suitable Embeddings object"
)
return self.max_marginal_relevance_search_by_vector(
embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to. |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | langchain/vectorstores/weaviate.py | k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
vector = {"vector": embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
results = (
query_obj.with_additional("vector")
.with_near_vector(vector)
.with_limit(fetch_k)
.do()
)
payload = results["data"]["Get"][self._index_name]
embeddings = [result["_additional"]["vector"] for result in payload]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
docs = []
for idx in mmr_selected:
text = payload[idx].pop(self._text_key)
payload[idx].pop("_additional")
meta = payload[idx]
docs.append(Document(page_content=text, metadata=meta)) |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | langchain/vectorstores/weaviate.py | return docs
@classmethod
def from_texts(
cls: Type[Weaviate],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> Weaviate:
"""Construct Weaviate wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Weaviate instance.
3. Adds the documents to the newly created Weaviate index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores.weaviate import Weaviate
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
weaviate = Weaviate.from_texts(
texts,
embeddings,
weaviate_url="http://localhost:8080"
)
"""
client = _create_weaviate_client(**kwargs)
from weaviate.util import get_valid_uuid
index_name = kwargs.get("index_name", f"LangChain_{uuid4().hex}")
embeddings = embedding.embed_documents(texts) if embedding else None |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | langchain/vectorstores/weaviate.py | text_key = "text"
schema = _default_schema(index_name)
attributes = list(metadatas[0].keys()) if metadatas else None
if not client.schema.contains(schema):
client.schema.create_class(schema)
with client.batch as batch:
for i, text in enumerate(texts):
data_properties = {
text_key: text,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
params = {
"uuid": _id,
"data_object": data_properties,
"class_name": index_name,
}
if embeddings is not None:
params["vector"] = embeddings[i]
batch.add_data_object(**params)
batch.flush()
return cls(client, index_name, text_key, embedding, attributes) |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | tests/integration_tests/vectorstores/test_weaviate.py | """Test Weaviate functionality."""
import logging
import os
from typing import Generator, Union
import pytest
from weaviate import Client
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.weaviate import Weaviate
logging.basicConfig(level=logging.DEBUG)
"""
cd tests/integration_tests/vectorstores/docker-compose
docker compose -f weaviate.yml up
"""
class TestWeaviate:
@classmethod
def setup_class(cls) -> None:
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY environment variable is not set")
@pytest.fixture(scope="class", autouse=True)
def weaviate_url(self) -> Union[str, Generator[str, None, None]]:
"""Return the weaviate url."""
url = "http://localhost:8080"
yield url
client = Client(url)
client.schema.delete_all()
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_without_metadata( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | tests/integration_tests/vectorstores/test_weaviate.py | self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = Weaviate.from_texts(
texts,
embedding_openai,
weaviate_url=weaviate_url,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_metadata(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_metadata_and_filter( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | tests/integration_tests/vectorstores/test_weaviate.py | self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
output = docsearch.similarity_search(
"foo",
k=2,
where_filter={"path": ["page"], "operator": "Equal", "valueNumber": 0},
)
assert output == [Document(page_content="foo", metadata={"page": 0})]
@pytest.mark.vcr(ignore_localhost=True)
def test_max_marginal_relevance_search(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
standard_ranking = docsearch.similarity_search("foo", k=2)
output = docsearch.max_marginal_relevance_search(
"foo", k=2, fetch_k=3, lambda_mult=1.0
)
assert output == standard_ranking |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | tests/integration_tests/vectorstores/test_weaviate.py | output = docsearch.max_marginal_relevance_search(
"foo", k=2, fetch_k=3, lambda_mult=0.0
)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
@pytest.mark.vcr(ignore_localhost=True)
def test_max_marginal_relevance_search_by_vector(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and MRR search by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
foo_embedding = embedding_openai.embed_query("foo")
standard_ranking = docsearch.similarity_search("foo", k=2)
output = docsearch.max_marginal_relevance_search_by_vector(
foo_embedding, k=2, fetch_k=3, lambda_mult=1.0
)
assert output == standard_ranking
output = docsearch.max_marginal_relevance_search_by_vector(
foo_embedding, k=2, fetch_k=3, lambda_mult=0.0
)
assert output == [ |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 2,695 | Allow Weaviate initialization with alternative embedding implementation | I would like to provide an 'embeddings' parameter for the initialization of the Weaviate vector store, as I do not want to start the Weaviate server with the OpenAI key in order to make use of embeddings through the Azure OpenAI Service.
The addition of the embeddings parameter affects the __init__ method, as shown in the code snippet above. To accommodate this change, you'll also need to modify the add_texts method.
```python
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding_function: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._embedding_function = embedding_function
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
```
To check if the embeddings parameter was provided during initialization and perform the necessary actions, you can modify the add_texts method in the following way:
```python
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(doc))
batch.add_data_object(data_properties, self._index_name, _id, vector=embeddings[0])
else:
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
``` | https://github.com/langchain-ai/langchain/issues/2695 | https://github.com/langchain-ai/langchain/pull/3608 | 615812581ea3175b3ae9ec59036008d013052396 | 440c98e24bf3f18c132694309872592ef550e1bc | "2023-04-11T05:19:00Z" | python | "2023-04-27T04:45:03Z" | tests/integration_tests/vectorstores/test_weaviate.py | Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
@pytest.mark.vcr(ignore_localhost=True)
def test_max_marginal_relevance_search_with_filter(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
where_filter = {"path": ["page"], "operator": "Equal", "valueNumber": 0}
standard_ranking = docsearch.similarity_search(
"foo", k=2, where_filter=where_filter
)
output = docsearch.max_marginal_relevance_search(
"foo", k=2, fetch_k=3, lambda_mult=1.0, where_filter=where_filter
)
assert output == standard_ranking
output = docsearch.max_marginal_relevance_search(
"foo", k=2, fetch_k=3, lambda_mult=0.0, where_filter=where_filter
)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
] |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,664 | import error when importing `from langchain import OpenAI` on 0.0.151 | got the following error when running today:
``` File "venv/lib/python3.11/site-packages/langchain/__init__.py", line 6, in <module>
from langchain.agents import MRKLChain, ReActChain, SelfAskWithSearchChain
File "venv/lib/python3.11/site-packages/langchain/agents/__init__.py", line 2, in <module>
from langchain.agents.agent import (
File "venv/lib/python3.11/site-packages/langchain/agents/agent.py", line 17, in <module>
from langchain.chains.base import Chain
File "venv/lib/python3.11/site-packages/langchain/chains/__init__.py", line 2, in <module>
from langchain.chains.api.base import APIChain
File "venv/lib/python3.11/site-packages/langchain/chains/api/base.py", line 8, in <module>
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
File "venv/lib/python3.11/site-packages/langchain/chains/api/prompt.py", line 2, in <module>
from langchain.prompts.prompt import PromptTemplate
File "venv/lib/python3.11/site-packages/langchain/prompts/__init__.py", line 14, in <module>
from langchain.prompts.loading import load_prompt
File "venv/lib/python3.11/site-packages/langchain/prompts/loading.py", line 14, in <module>
from langchain.utilities.loading import try_load_from_hub
File "venv/lib/python3.11/site-packages/langchain/utilities/__init__.py", line 5, in <module>
from langchain.utilities.bash import BashProcess
File "venv/lib/python3.11/site-packages/langchain/utilities/bash.py", line 7, in <module>
import pexpect
ModuleNotFoundError: No module named 'pexpect'
```
does this need to be added to project dependencies? | https://github.com/langchain-ai/langchain/issues/3664 | https://github.com/langchain-ai/langchain/pull/3667 | 708787dddb2fa3cdb2d1dabefa00c01ffec572f6 | 1b5721c999c9fc310cefec383666f43c80ec9620 | "2023-04-27T16:24:30Z" | python | "2023-04-27T18:39:01Z" | langchain/utilities/bash.py | """Wrapper around subprocess to run commands."""
import re
import subprocess
from typing import List, Union
from uuid import uuid4
import pexpect
class BashProcess: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,664 | import error when importing `from langchain import OpenAI` on 0.0.151 | got the following error when running today:
``` File "venv/lib/python3.11/site-packages/langchain/__init__.py", line 6, in <module>
from langchain.agents import MRKLChain, ReActChain, SelfAskWithSearchChain
File "venv/lib/python3.11/site-packages/langchain/agents/__init__.py", line 2, in <module>
from langchain.agents.agent import (
File "venv/lib/python3.11/site-packages/langchain/agents/agent.py", line 17, in <module>
from langchain.chains.base import Chain
File "venv/lib/python3.11/site-packages/langchain/chains/__init__.py", line 2, in <module>
from langchain.chains.api.base import APIChain
File "venv/lib/python3.11/site-packages/langchain/chains/api/base.py", line 8, in <module>
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
File "venv/lib/python3.11/site-packages/langchain/chains/api/prompt.py", line 2, in <module>
from langchain.prompts.prompt import PromptTemplate
File "venv/lib/python3.11/site-packages/langchain/prompts/__init__.py", line 14, in <module>
from langchain.prompts.loading import load_prompt
File "venv/lib/python3.11/site-packages/langchain/prompts/loading.py", line 14, in <module>
from langchain.utilities.loading import try_load_from_hub
File "venv/lib/python3.11/site-packages/langchain/utilities/__init__.py", line 5, in <module>
from langchain.utilities.bash import BashProcess
File "venv/lib/python3.11/site-packages/langchain/utilities/bash.py", line 7, in <module>
import pexpect
ModuleNotFoundError: No module named 'pexpect'
```
does this need to be added to project dependencies? | https://github.com/langchain-ai/langchain/issues/3664 | https://github.com/langchain-ai/langchain/pull/3667 | 708787dddb2fa3cdb2d1dabefa00c01ffec572f6 | 1b5721c999c9fc310cefec383666f43c80ec9620 | "2023-04-27T16:24:30Z" | python | "2023-04-27T18:39:01Z" | langchain/utilities/bash.py | """Executes bash commands and returns the output."""
def __init__(
self,
strip_newlines: bool = False,
return_err_output: bool = False,
persistent: bool = False,
):
"""Initialize with stripping newlines."""
self.strip_newlines = strip_newlines
self.return_err_output = return_err_output
self.prompt = ""
self.process = None
if persistent:
self.prompt = str(uuid4())
self.process = self._initialize_persistent_process(self.prompt)
@staticmethod
def _initialize_persistent_process(prompt: str) -> pexpect.spawn:
process = pexpect.spawn(
"env", ["-i", "bash", "--norc", "--noprofile"], encoding="utf-8"
)
process.sendline("PS1=" + prompt)
process.expect_exact(prompt, timeout=10)
return process
def run(self, commands: Union[str, List[str]]) -> str: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,664 | import error when importing `from langchain import OpenAI` on 0.0.151 | got the following error when running today:
``` File "venv/lib/python3.11/site-packages/langchain/__init__.py", line 6, in <module>
from langchain.agents import MRKLChain, ReActChain, SelfAskWithSearchChain
File "venv/lib/python3.11/site-packages/langchain/agents/__init__.py", line 2, in <module>
from langchain.agents.agent import (
File "venv/lib/python3.11/site-packages/langchain/agents/agent.py", line 17, in <module>
from langchain.chains.base import Chain
File "venv/lib/python3.11/site-packages/langchain/chains/__init__.py", line 2, in <module>
from langchain.chains.api.base import APIChain
File "venv/lib/python3.11/site-packages/langchain/chains/api/base.py", line 8, in <module>
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
File "venv/lib/python3.11/site-packages/langchain/chains/api/prompt.py", line 2, in <module>
from langchain.prompts.prompt import PromptTemplate
File "venv/lib/python3.11/site-packages/langchain/prompts/__init__.py", line 14, in <module>
from langchain.prompts.loading import load_prompt
File "venv/lib/python3.11/site-packages/langchain/prompts/loading.py", line 14, in <module>
from langchain.utilities.loading import try_load_from_hub
File "venv/lib/python3.11/site-packages/langchain/utilities/__init__.py", line 5, in <module>
from langchain.utilities.bash import BashProcess
File "venv/lib/python3.11/site-packages/langchain/utilities/bash.py", line 7, in <module>
import pexpect
ModuleNotFoundError: No module named 'pexpect'
```
does this need to be added to project dependencies? | https://github.com/langchain-ai/langchain/issues/3664 | https://github.com/langchain-ai/langchain/pull/3667 | 708787dddb2fa3cdb2d1dabefa00c01ffec572f6 | 1b5721c999c9fc310cefec383666f43c80ec9620 | "2023-04-27T16:24:30Z" | python | "2023-04-27T18:39:01Z" | langchain/utilities/bash.py | """Run commands and return final output."""
if isinstance(commands, str):
commands = [commands]
commands = ";".join(commands)
if self.process is not None:
return self._run_persistent(
commands,
)
else:
return self._run(commands)
def _run(self, command: str) -> str:
"""Run commands and return final output."""
try:
output = subprocess.run(
command,
shell=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
).stdout.decode()
except subprocess.CalledProcessError as error:
if self.return_err_output:
return error.stdout.decode()
return str(error)
if self.strip_newlines:
output = output.strip()
return output
def process_output(self, output: str, command: str) -> str: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,664 | import error when importing `from langchain import OpenAI` on 0.0.151 | got the following error when running today:
``` File "venv/lib/python3.11/site-packages/langchain/__init__.py", line 6, in <module>
from langchain.agents import MRKLChain, ReActChain, SelfAskWithSearchChain
File "venv/lib/python3.11/site-packages/langchain/agents/__init__.py", line 2, in <module>
from langchain.agents.agent import (
File "venv/lib/python3.11/site-packages/langchain/agents/agent.py", line 17, in <module>
from langchain.chains.base import Chain
File "venv/lib/python3.11/site-packages/langchain/chains/__init__.py", line 2, in <module>
from langchain.chains.api.base import APIChain
File "venv/lib/python3.11/site-packages/langchain/chains/api/base.py", line 8, in <module>
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
File "venv/lib/python3.11/site-packages/langchain/chains/api/prompt.py", line 2, in <module>
from langchain.prompts.prompt import PromptTemplate
File "venv/lib/python3.11/site-packages/langchain/prompts/__init__.py", line 14, in <module>
from langchain.prompts.loading import load_prompt
File "venv/lib/python3.11/site-packages/langchain/prompts/loading.py", line 14, in <module>
from langchain.utilities.loading import try_load_from_hub
File "venv/lib/python3.11/site-packages/langchain/utilities/__init__.py", line 5, in <module>
from langchain.utilities.bash import BashProcess
File "venv/lib/python3.11/site-packages/langchain/utilities/bash.py", line 7, in <module>
import pexpect
ModuleNotFoundError: No module named 'pexpect'
```
does this need to be added to project dependencies? | https://github.com/langchain-ai/langchain/issues/3664 | https://github.com/langchain-ai/langchain/pull/3667 | 708787dddb2fa3cdb2d1dabefa00c01ffec572f6 | 1b5721c999c9fc310cefec383666f43c80ec9620 | "2023-04-27T16:24:30Z" | python | "2023-04-27T18:39:01Z" | langchain/utilities/bash.py | pattern = re.escape(command) + r"\s*\n"
output = re.sub(pattern, "", output, count=1)
return output.strip()
def _run_persistent(self, command: str) -> str:
"""Run commands and return final output."""
if self.process is None:
raise ValueError("Process not initialized")
self.process.sendline(command)
self.process.expect(self.prompt, timeout=10)
self.process.sendline("")
try:
self.process.expect([self.prompt, pexpect.EOF], timeout=10)
except pexpect.TIMEOUT:
return f"Timeout error while executing command {command}"
if self.process.after == pexpect.EOF:
return f"Exited with error status: {self.process.exitstatus}"
output = self.process.before
output = self.process_output(output, command)
if self.strip_newlines:
return output.strip()
return output |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | """Wrapper around ChromaDB embeddings platform."""
from __future__ import annotations
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import xor_args
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import chromadb
import chromadb.config
logger = logging.getLogger(__name__)
def _results_to_docs(results: Any) -> List[Document]: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | return [doc for doc, _ in _results_to_docs_and_scores(results)]
def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]:
return [
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
class Chroma(VectorStore):
"""Wrapper around ChromaDB embeddings platform.
To use, you should have the ``chromadb`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Chroma("langchain_store", embeddings.embed_query)
"""
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
def __init__( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | self,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
embedding_function: Optional[Embeddings] = None,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
collection_metadata: Optional[Dict] = None,
client: Optional[chromadb.Client] = None,
) -> None:
"""Initialize with Chroma client."""
try:
import chromadb |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | import chromadb.config
except ImportError:
raise ValueError(
"Could not import chromadb python package. "
"Please install it with `pip install chromadb`."
)
if client is not None:
self._client = client
else:
if client_settings:
self._client_settings = client_settings
else:
self._client_settings = chromadb.config.Settings()
if persist_directory is not None:
self._client_settings = chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=persist_directory,
)
self._client = chromadb.Client(self._client_settings)
self._embedding_function = embedding_function
self._persist_directory = persist_directory
self._collection = self._client.get_or_create_collection(
name=collection_name,
embedding_function=self._embedding_function.embed_documents
if self._embedding_function is not None
else None,
metadata=collection_metadata,
)
@xor_args(("query_texts", "query_embeddings"))
def __query_collection( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
"""Query the chroma collection."""
for i in range(n_results, 0, -1):
try:
return self._collection.query(
query_texts=query_texts,
query_embeddings=query_embeddings,
n_results=i,
where=where,
)
except chromadb.errors.NotEnoughElementsException:
logger.error(
f"Chroma collection {self._collection.name} "
f"contains fewer than {i} elements."
)
raise chromadb.errors.NotEnoughElementsException(
f"No documents found for Chroma collection {self._collection.name}"
)
def add_texts( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
Returns:
List[str]: List of IDs of the added texts.
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = None
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(texts))
self._collection.add(
metadatas=metadatas, embeddings=embeddings, documents=texts, ids=ids
)
return ids
def similarity_search( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with Chroma.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter)
return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
results = self.__query_collection(
query_embeddings=embedding, n_results=k, where=filter
)
return _results_to_docs(results)
def similarity_search_with_score( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Run similarity search with Chroma with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float.
"""
if self._embedding_function is None:
results = self.__query_collection(
query_texts=[query], n_results=k, where=filter
)
else:
query_embedding = self._embedding_function.embed_query(query)
results = self.__query_collection( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | query_embeddings=[query_embedding], n_results=k, where=filter
)
return _results_to_docs_and_scores(results)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k, |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
mmr_selected = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
results["embeddings"][0],
k=k,
lambda_mult=lambda_mult,
)
candidates = _results_to_docs(results)
selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected]
return selected_results
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on" "creation."
)
embedding = self._embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mul=lambda_mult, filter=filter
)
return docs
def delete_collection(self) -> None:
"""Delete the collection."""
self._client.delete_collection(self._collection.name)
def persist(self) -> None:
"""Persist the collection.
This can be used to explicitly persist the data to disk.
It will also be called automatically when the object is destroyed.
"""
if self._persist_directory is None:
raise ValueError(
"You must specify a persist_directory on"
"creation to persist the collection."
)
self._client.persist()
def update_document(self, document_id: str, document: Document) -> None: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | """Update a document in the collection.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
"""
text = document.page_content
metadata = document.metadata
self._collection.update_document(document_id, text, metadata)
@classmethod
def from_texts(
cls: Type[Chroma],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None,
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a raw documents. |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
texts (List[str]): List of texts to add to the collection.
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
Returns:
Chroma: Chroma vectorstore.
"""
chroma_collection = cls(
collection_name=collection_name,
embedding_function=embedding,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
)
chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return chroma_collection
@classmethod
def from_documents(
cls: Type[Chroma],
documents: List[Document],
embedding: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str] = None, |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | langchain/vectorstores/chroma.py | client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None,
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a list of documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
Returns:
Chroma: Chroma vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
) |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | tests/integration_tests/vectorstores/test_chroma.py | """Test Chroma functionality."""
import pytest
from langchain.docstore.document import Document
from langchain.vectorstores import Chroma
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_chroma() -> None: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | tests/integration_tests/vectorstores/test_chroma.py | """Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Chroma.from_texts(
collection_name="test_collection", texts=texts, embedding=FakeEmbeddings()
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.asyncio
async def test_chroma_async() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Chroma.from_texts(
collection_name="test_collection", texts=texts, embedding=FakeEmbeddings()
)
output = await docsearch.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_chroma_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(
collection_name="test_collection",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_chroma_with_metadatas_with_scores() -> None: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | tests/integration_tests/vectorstores/test_chroma.py | """Test end to end construction and scored search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(
collection_name="test_collection",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_chroma_search_filter() -> None:
"""Test end to end construction and search with metadata filtering."""
texts = ["far", "bar", "baz"]
metadatas = [{"first_letter": "{}".format(text[0])} for text in texts]
docsearch = Chroma.from_texts(
collection_name="test_collection",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search("far", k=1, filter={"first_letter": "f"})
assert output == [Document(page_content="far", metadata={"first_letter": "f"})]
output = docsearch.similarity_search("far", k=1, filter={"first_letter": "b"})
assert output == [Document(page_content="bar", metadata={"first_letter": "b"})]
def test_chroma_search_filter_with_scores() -> None: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | tests/integration_tests/vectorstores/test_chroma.py | """Test end to end construction and scored search with metadata filtering."""
texts = ["far", "bar", "baz"]
metadatas = [{"first_letter": "{}".format(text[0])} for text in texts]
docsearch = Chroma.from_texts(
collection_name="test_collection",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search_with_score(
"far", k=1, filter={"first_letter": "f"}
)
assert output == [
(Document(page_content="far", metadata={"first_letter": "f"}), 0.0)
]
output = docsearch.similarity_search_with_score(
"far", k=1, filter={"first_letter": "b"}
)
assert output == [
(Document(page_content="bar", metadata={"first_letter": "b"}), 1.0)
]
def test_chroma_with_persistence() -> None: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,628 | Chroma.py max_marginal_relevance_search_by_vector method currently broken | Using MMR with Chroma currently does not work because the max_marginal_relevance_search_by_vector method calls self.__query_collection with the parameter "include:", but "include" is not an accepted parameter for __query_collection. This appears to be a regression introduced with #3372
Excerpt from max_marginal_relevance_search_by_vector method:
```
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
```
__query_collection does not accept include:
```
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
```
This results in an unexpected keyword error.
The short term fix is to use self._collection.query instead of self.__query_collection in max_marginal_relevance_search_by_vector, although that loses the protection when the user requests more records than exist in the store.
```
results = self._collection.query(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
include=["metadatas", "documents", "distances", "embeddings"],
)
``` | https://github.com/langchain-ai/langchain/issues/3628 | https://github.com/langchain-ai/langchain/pull/3897 | 3e1cb31f63b5c7147939feca7f8095377f64e145 | 245131097557b73774197b01e326206fa2a1b83a | "2023-04-27T00:21:42Z" | python | "2023-05-01T17:47:15Z" | tests/integration_tests/vectorstores/test_chroma.py | """Test end to end construction and search, with persistence."""
chroma_persist_dir = "./tests/persist_dir"
collection_name = "test_collection"
texts = ["foo", "bar", "baz"]
docsearch = Chroma.from_texts(
collection_name=collection_name,
texts=texts,
embedding=FakeEmbeddings(),
persist_directory=chroma_persist_dir,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.persist()
docsearch = Chroma(
collection_name=collection_name,
embedding_function=FakeEmbeddings(),
persist_directory=chroma_persist_dir,
)
output = docsearch.similarity_search("foo", k=1)
docsearch.delete_collection() |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,988 | LangChain openAI callback doesn't allow finetuned models | Hi all!
I have an [application](https://github.com/ur-whitelab/BO-LIFT) based on langchain.
A few months ago, I used it with fine-tuned (FT) models.
We added a token usage counter later, and I haven't tried fine-tuned models again since then.
Recently we have been interested in using (FT) models again, but the callback to expose the token usage isn't accepting the model.
Minimal code to reproduce the error:
```
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
llm = OpenAI(
model_name=FT_MODEL,
temperature=0.7,
n=5,
max_tokens=64,
)
with get_openai_callback() as cb:
completion_response = llm.generate(["QUERY"])
token_usage = cb.total_tokens
```
It works fine if the model name is a basic openAI model. For instance, ```model_name="text-davinci-003"```
But when I try to use one of my FT models, I get this error:
```
Error in on_llm_end callback: Unknown model: FT_MODEL. Please provide a valid OpenAI model name.Known models are: gpt-4, gpt-4-0314, gpt-4-completion, gpt-4-0314-completion, gpt-4-32k, gpt-4-32k-0314, gpt-4-32k-completion, gpt-4-32k-0314-completion, gpt-3.5-turbo, gpt-3.5-turbo-0301, text-ada-001, ada, text-babbage-001, babbage, text-curie-001, curie, text-davinci-003, text-davinci-002, code-davinci-002
```
It works if I remove the callback and avoid token counting, but it'd be nice to have any suggestions on how to make it work.
Is there a workaround for that?
Any help is welcome!
Thanks! | https://github.com/langchain-ai/langchain/issues/3988 | https://github.com/langchain-ai/langchain/pull/4009 | aa383559999b3d6a781c62ed7f8589fef8892879 | f08a76250fe8995fb3f05bf785677070922d4b0d | "2023-05-02T18:00:22Z" | python | "2023-05-02T23:19:57Z" | langchain/callbacks/openai_info.py | """Callback Handler that prints to std out."""
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
def get_openai_model_cost_per_1k_tokens(
model_name: str, is_completion: bool = False
) -> float:
model_cost_mapping = {
"gpt-4": 0.03,
"gpt-4-0314": 0.03,
"gpt-4-completion": 0.06,
"gpt-4-0314-completion": 0.06,
"gpt-4-32k": 0.06,
"gpt-4-32k-0314": 0.06,
"gpt-4-32k-completion": 0.12,
"gpt-4-32k-0314-completion": 0.12,
"gpt-3.5-turbo": 0.002,
"gpt-3.5-turbo-0301": 0.002,
"text-ada-001": 0.0004,
"ada": 0.0004,
"text-babbage-001": 0.0005,
"babbage": 0.0005,
"text-curie-001": 0.002,
"curie": 0.002,
"text-davinci-003": 0.02,
"text-davinci-002": 0.02,
"code-davinci-002": 0.02,
}
cost = model_cost_mapping.get( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,988 | LangChain openAI callback doesn't allow finetuned models | Hi all!
I have an [application](https://github.com/ur-whitelab/BO-LIFT) based on langchain.
A few months ago, I used it with fine-tuned (FT) models.
We added a token usage counter later, and I haven't tried fine-tuned models again since then.
Recently we have been interested in using (FT) models again, but the callback to expose the token usage isn't accepting the model.
Minimal code to reproduce the error:
```
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
llm = OpenAI(
model_name=FT_MODEL,
temperature=0.7,
n=5,
max_tokens=64,
)
with get_openai_callback() as cb:
completion_response = llm.generate(["QUERY"])
token_usage = cb.total_tokens
```
It works fine if the model name is a basic openAI model. For instance, ```model_name="text-davinci-003"```
But when I try to use one of my FT models, I get this error:
```
Error in on_llm_end callback: Unknown model: FT_MODEL. Please provide a valid OpenAI model name.Known models are: gpt-4, gpt-4-0314, gpt-4-completion, gpt-4-0314-completion, gpt-4-32k, gpt-4-32k-0314, gpt-4-32k-completion, gpt-4-32k-0314-completion, gpt-3.5-turbo, gpt-3.5-turbo-0301, text-ada-001, ada, text-babbage-001, babbage, text-curie-001, curie, text-davinci-003, text-davinci-002, code-davinci-002
```
It works if I remove the callback and avoid token counting, but it'd be nice to have any suggestions on how to make it work.
Is there a workaround for that?
Any help is welcome!
Thanks! | https://github.com/langchain-ai/langchain/issues/3988 | https://github.com/langchain-ai/langchain/pull/4009 | aa383559999b3d6a781c62ed7f8589fef8892879 | f08a76250fe8995fb3f05bf785677070922d4b0d | "2023-05-02T18:00:22Z" | python | "2023-05-02T23:19:57Z" | langchain/callbacks/openai_info.py | model_name.lower()
+ ("-completion" if is_completion and model_name.startswith("gpt-4") else ""),
None,
)
if cost is None:
raise ValueError(
f"Unknown model: {model_name}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(model_cost_mapping.keys())
)
return cost
class OpenAICallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks OpenAI info."""
total_tokens: int = 0
prompt_tokens: int = 0
completion_tokens: int = 0
successful_requests: int = 0
total_cost: float = 0.0
def __repr__(self) -> str:
return (
f"Tokens Used: {self.total_tokens}\n"
f"\tPrompt Tokens: {self.prompt_tokens}\n"
f"\tCompletion Tokens: {self.completion_tokens}\n"
f"Successful Requests: {self.successful_requests}\n"
f"Total Cost (USD): ${self.total_cost}"
)
@property
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return True
def on_llm_start( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,988 | LangChain openAI callback doesn't allow finetuned models | Hi all!
I have an [application](https://github.com/ur-whitelab/BO-LIFT) based on langchain.
A few months ago, I used it with fine-tuned (FT) models.
We added a token usage counter later, and I haven't tried fine-tuned models again since then.
Recently we have been interested in using (FT) models again, but the callback to expose the token usage isn't accepting the model.
Minimal code to reproduce the error:
```
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
llm = OpenAI(
model_name=FT_MODEL,
temperature=0.7,
n=5,
max_tokens=64,
)
with get_openai_callback() as cb:
completion_response = llm.generate(["QUERY"])
token_usage = cb.total_tokens
```
It works fine if the model name is a basic openAI model. For instance, ```model_name="text-davinci-003"```
But when I try to use one of my FT models, I get this error:
```
Error in on_llm_end callback: Unknown model: FT_MODEL. Please provide a valid OpenAI model name.Known models are: gpt-4, gpt-4-0314, gpt-4-completion, gpt-4-0314-completion, gpt-4-32k, gpt-4-32k-0314, gpt-4-32k-completion, gpt-4-32k-0314-completion, gpt-3.5-turbo, gpt-3.5-turbo-0301, text-ada-001, ada, text-babbage-001, babbage, text-curie-001, curie, text-davinci-003, text-davinci-002, code-davinci-002
```
It works if I remove the callback and avoid token counting, but it'd be nice to have any suggestions on how to make it work.
Is there a workaround for that?
Any help is welcome!
Thanks! | https://github.com/langchain-ai/langchain/issues/3988 | https://github.com/langchain-ai/langchain/pull/4009 | aa383559999b3d6a781c62ed7f8589fef8892879 | f08a76250fe8995fb3f05bf785677070922d4b0d | "2023-05-02T18:00:22Z" | python | "2023-05-02T23:19:57Z" | langchain/callbacks/openai_info.py | self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Print out the prompts."""
pass
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Print out the token."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
if response.llm_output is not None:
self.successful_requests += 1
if "token_usage" in response.llm_output:
token_usage = response.llm_output["token_usage"]
if "model_name" in response.llm_output:
completion_cost = get_openai_model_cost_per_1k_tokens(
response.llm_output["model_name"], is_completion=True
) * (token_usage.get("completion_tokens", 0) / 1000)
prompt_cost = get_openai_model_cost_per_1k_tokens(
response.llm_output["model_name"]
) * (token_usage.get("prompt_tokens", 0) / 1000)
self.total_cost += prompt_cost + completion_cost
if "total_tokens" in token_usage:
self.total_tokens += token_usage["total_tokens"]
if "prompt_tokens" in token_usage:
self.prompt_tokens += token_usage["prompt_tokens"]
if "completion_tokens" in token_usage:
self.completion_tokens += token_usage["completion_tokens"]
def on_llm_error( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,988 | LangChain openAI callback doesn't allow finetuned models | Hi all!
I have an [application](https://github.com/ur-whitelab/BO-LIFT) based on langchain.
A few months ago, I used it with fine-tuned (FT) models.
We added a token usage counter later, and I haven't tried fine-tuned models again since then.
Recently we have been interested in using (FT) models again, but the callback to expose the token usage isn't accepting the model.
Minimal code to reproduce the error:
```
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
llm = OpenAI(
model_name=FT_MODEL,
temperature=0.7,
n=5,
max_tokens=64,
)
with get_openai_callback() as cb:
completion_response = llm.generate(["QUERY"])
token_usage = cb.total_tokens
```
It works fine if the model name is a basic openAI model. For instance, ```model_name="text-davinci-003"```
But when I try to use one of my FT models, I get this error:
```
Error in on_llm_end callback: Unknown model: FT_MODEL. Please provide a valid OpenAI model name.Known models are: gpt-4, gpt-4-0314, gpt-4-completion, gpt-4-0314-completion, gpt-4-32k, gpt-4-32k-0314, gpt-4-32k-completion, gpt-4-32k-0314-completion, gpt-3.5-turbo, gpt-3.5-turbo-0301, text-ada-001, ada, text-babbage-001, babbage, text-curie-001, curie, text-davinci-003, text-davinci-002, code-davinci-002
```
It works if I remove the callback and avoid token counting, but it'd be nice to have any suggestions on how to make it work.
Is there a workaround for that?
Any help is welcome!
Thanks! | https://github.com/langchain-ai/langchain/issues/3988 | https://github.com/langchain-ai/langchain/pull/4009 | aa383559999b3d6a781c62ed7f8589fef8892879 | f08a76250fe8995fb3f05bf785677070922d4b0d | "2023-05-02T18:00:22Z" | python | "2023-05-02T23:19:57Z" | langchain/callbacks/openai_info.py | self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Do nothing."""
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain."""
pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain."""
pass
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Do nothing."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Print out the log in specified color."""
pass
def on_tool_end( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 3,988 | LangChain openAI callback doesn't allow finetuned models | Hi all!
I have an [application](https://github.com/ur-whitelab/BO-LIFT) based on langchain.
A few months ago, I used it with fine-tuned (FT) models.
We added a token usage counter later, and I haven't tried fine-tuned models again since then.
Recently we have been interested in using (FT) models again, but the callback to expose the token usage isn't accepting the model.
Minimal code to reproduce the error:
```
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
llm = OpenAI(
model_name=FT_MODEL,
temperature=0.7,
n=5,
max_tokens=64,
)
with get_openai_callback() as cb:
completion_response = llm.generate(["QUERY"])
token_usage = cb.total_tokens
```
It works fine if the model name is a basic openAI model. For instance, ```model_name="text-davinci-003"```
But when I try to use one of my FT models, I get this error:
```
Error in on_llm_end callback: Unknown model: FT_MODEL. Please provide a valid OpenAI model name.Known models are: gpt-4, gpt-4-0314, gpt-4-completion, gpt-4-0314-completion, gpt-4-32k, gpt-4-32k-0314, gpt-4-32k-completion, gpt-4-32k-0314-completion, gpt-3.5-turbo, gpt-3.5-turbo-0301, text-ada-001, ada, text-babbage-001, babbage, text-curie-001, curie, text-davinci-003, text-davinci-002, code-davinci-002
```
It works if I remove the callback and avoid token counting, but it'd be nice to have any suggestions on how to make it work.
Is there a workaround for that?
Any help is welcome!
Thanks! | https://github.com/langchain-ai/langchain/issues/3988 | https://github.com/langchain-ai/langchain/pull/4009 | aa383559999b3d6a781c62ed7f8589fef8892879 | f08a76250fe8995fb3f05bf785677070922d4b0d | "2023-05-02T18:00:22Z" | python | "2023-05-02T23:19:57Z" | langchain/callbacks/openai_info.py | self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
pass
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Do nothing."""
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
pass
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on agent end."""
pass
def __copy__(self) -> "OpenAICallbackHandler":
"""Return a copy of the callback handler."""
return self
def __deepcopy__(self, memo: Any) -> "OpenAICallbackHandler":
"""Return a deep copy of the callback handler."""
return self |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 4,053 | Tools with partials (Partial functions not yet supported in tools) | We commonly used this pattern to create tools:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=partial(foo, "bar"),
name = "foo",
description="foobar"
)
```
which as of 0.0.148 (I think) gives a pydantic error "Partial functions not yet supported in tools." We must use instead this format:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=lambda y: foo(x="bar",y=y),
name = "foo",
description="foobar"
)
```
It would be nice to again support partials. | https://github.com/langchain-ai/langchain/issues/4053 | https://github.com/langchain-ai/langchain/pull/4058 | 7e967aa4d581bec8b29e9ea44267505b0bad18b9 | afa9d1292b0a152e36d338dde7b02f0b93bd37d9 | "2023-05-03T17:28:46Z" | python | "2023-05-03T20:16:41Z" | langchain/tools/base.py | """Base implementation for tools or skills."""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from functools import partial
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, Optional, Tuple, Type, Union
from pydantic import (
BaseModel,
Extra,
Field,
create_model,
root_validator,
validate_arguments,
validator,
)
from pydantic.main import ModelMetaclass
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForToolRun,
CallbackManager,
CallbackManagerForToolRun,
Callbacks,
)
class SchemaAnnotationError(TypeError):
"""Raised when 'args_schema' is missing or has an incorrect type annotation."""
class ToolMetaclass(ModelMetaclass): |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 4,053 | Tools with partials (Partial functions not yet supported in tools) | We commonly used this pattern to create tools:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=partial(foo, "bar"),
name = "foo",
description="foobar"
)
```
which as of 0.0.148 (I think) gives a pydantic error "Partial functions not yet supported in tools." We must use instead this format:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=lambda y: foo(x="bar",y=y),
name = "foo",
description="foobar"
)
```
It would be nice to again support partials. | https://github.com/langchain-ai/langchain/issues/4053 | https://github.com/langchain-ai/langchain/pull/4058 | 7e967aa4d581bec8b29e9ea44267505b0bad18b9 | afa9d1292b0a152e36d338dde7b02f0b93bd37d9 | "2023-05-03T17:28:46Z" | python | "2023-05-03T20:16:41Z" | langchain/tools/base.py | """Metaclass for BaseTool to ensure the provided args_schema
doesn't silently ignored."""
def __new__(
cls: Type[ToolMetaclass], name: str, bases: Tuple[Type, ...], dct: dict
) -> ToolMetaclass:
"""Create the definition of the new tool class."""
schema_type: Optional[Type[BaseModel]] = dct.get("args_schema")
if schema_type is not None:
schema_annotations = dct.get("__annotations__", {})
args_schema_type = schema_annotations.get("args_schema", None)
if args_schema_type is None or args_schema_type == BaseModel:
typehint_mandate = """
class ChildTool(BaseTool):
...
args_schema: Type[BaseModel] = SchemaClass
..."""
raise SchemaAnnotationError(
f"Tool definition for {name} must include valid type annotations"
f" for argument 'args_schema' to behave as expected.\n"
f"Expected annotation of 'Type[BaseModel]'"
f" but got '{args_schema_type}'.\n"
f"Expected class looks like:\n"
f"{typehint_mandate}"
)
return super().__new__(cls, name, bases, dct)
def _create_subset_model( |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 4,053 | Tools with partials (Partial functions not yet supported in tools) | We commonly used this pattern to create tools:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=partial(foo, "bar"),
name = "foo",
description="foobar"
)
```
which as of 0.0.148 (I think) gives a pydantic error "Partial functions not yet supported in tools." We must use instead this format:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=lambda y: foo(x="bar",y=y),
name = "foo",
description="foobar"
)
```
It would be nice to again support partials. | https://github.com/langchain-ai/langchain/issues/4053 | https://github.com/langchain-ai/langchain/pull/4058 | 7e967aa4d581bec8b29e9ea44267505b0bad18b9 | afa9d1292b0a152e36d338dde7b02f0b93bd37d9 | "2023-05-03T17:28:46Z" | python | "2023-05-03T20:16:41Z" | langchain/tools/base.py | name: str, model: BaseModel, field_names: list
) -> Type[BaseModel]:
"""Create a pydantic model with only a subset of model's fields."""
fields = {
field_name: (
model.__fields__[field_name].type_,
model.__fields__[field_name].default,
)
for field_name in field_names
if field_name in model.__fields__
}
return create_model(name, **fields)
def get_filtered_args(
inferred_model: Type[BaseModel],
func: Callable,
) -> dict:
"""Get the arguments from a function's signature."""
schema = inferred_model.schema()["properties"]
valid_keys = signature(func).parameters
return {k: schema[k] for k in valid_keys if k != "run_manager"}
class _SchemaConfig: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 4,053 | Tools with partials (Partial functions not yet supported in tools) | We commonly used this pattern to create tools:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=partial(foo, "bar"),
name = "foo",
description="foobar"
)
```
which as of 0.0.148 (I think) gives a pydantic error "Partial functions not yet supported in tools." We must use instead this format:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=lambda y: foo(x="bar",y=y),
name = "foo",
description="foobar"
)
```
It would be nice to again support partials. | https://github.com/langchain-ai/langchain/issues/4053 | https://github.com/langchain-ai/langchain/pull/4058 | 7e967aa4d581bec8b29e9ea44267505b0bad18b9 | afa9d1292b0a152e36d338dde7b02f0b93bd37d9 | "2023-05-03T17:28:46Z" | python | "2023-05-03T20:16:41Z" | langchain/tools/base.py | """Configuration for the pydantic model."""
extra = Extra.forbid
arbitrary_types_allowed = True
def create_schema_from_function(
model_name: str,
func: Callable,
) -> Type[BaseModel]:
"""Create a pydantic schema from a function's signature."""
validated = validate_arguments(func, config=_SchemaConfig)
inferred_model = validated.model
if "run_manager" in inferred_model.__fields__:
del inferred_model.__fields__["run_manager"]
filtered_args = get_filtered_args(inferred_model, func)
return _create_subset_model(
f"{model_name}Schema", inferred_model, list(filtered_args)
)
class BaseTool(ABC, BaseModel, metaclass=ToolMetaclass): |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 4,053 | Tools with partials (Partial functions not yet supported in tools) | We commonly used this pattern to create tools:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=partial(foo, "bar"),
name = "foo",
description="foobar"
)
```
which as of 0.0.148 (I think) gives a pydantic error "Partial functions not yet supported in tools." We must use instead this format:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=lambda y: foo(x="bar",y=y),
name = "foo",
description="foobar"
)
```
It would be nice to again support partials. | https://github.com/langchain-ai/langchain/issues/4053 | https://github.com/langchain-ai/langchain/pull/4058 | 7e967aa4d581bec8b29e9ea44267505b0bad18b9 | afa9d1292b0a152e36d338dde7b02f0b93bd37d9 | "2023-05-03T17:28:46Z" | python | "2023-05-03T20:16:41Z" | langchain/tools/base.py | """Interface LangChain tools must implement."""
name: str
"""The unique name of the tool that clearly communicates its purpose."""
description: str
"""Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
"""
args_schema: Optional[Type[BaseModel]] = None
"""Pydantic model class to validate and parse the tool's input arguments."""
return_direct: bool = False
"""Whether to return the tool's output directly. Setting this to True means
that after the tool is called, the AgentExecutor will stop looping.
"""
verbose: bool = False
"""Whether to log the tool's progress."""
callbacks: Callbacks = None
"""Callbacks to be called during tool execution."""
callback_manager: Optional[BaseCallbackManager] = None
"""Deprecated. Please use callbacks instead."""
class Config: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 4,053 | Tools with partials (Partial functions not yet supported in tools) | We commonly used this pattern to create tools:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=partial(foo, "bar"),
name = "foo",
description="foobar"
)
```
which as of 0.0.148 (I think) gives a pydantic error "Partial functions not yet supported in tools." We must use instead this format:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=lambda y: foo(x="bar",y=y),
name = "foo",
description="foobar"
)
```
It would be nice to again support partials. | https://github.com/langchain-ai/langchain/issues/4053 | https://github.com/langchain-ai/langchain/pull/4058 | 7e967aa4d581bec8b29e9ea44267505b0bad18b9 | afa9d1292b0a152e36d338dde7b02f0b93bd37d9 | "2023-05-03T17:28:46Z" | python | "2023-05-03T20:16:41Z" | langchain/tools/base.py | """Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def is_single_input(self) -> bool:
"""Whether the tool only accepts a single input."""
return len(self.args) == 1
@property
def args(self) -> dict:
if self.args_schema is not None:
return self.args_schema.schema()["properties"]
else:
schema = create_schema_from_function(self.name, self._run)
return schema.schema()["properties"]
def _parse_input(
self,
tool_input: Union[str, Dict],
) -> None:
"""Convert tool input to pydantic model."""
input_args = self.args_schema
if isinstance(tool_input, str):
if input_args is not None:
key_ = next(iter(input_args.__fields__.keys()))
input_args.validate({key_: tool_input})
else:
if input_args is not None:
input_args.validate(tool_input)
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 4,053 | Tools with partials (Partial functions not yet supported in tools) | We commonly used this pattern to create tools:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=partial(foo, "bar"),
name = "foo",
description="foobar"
)
```
which as of 0.0.148 (I think) gives a pydantic error "Partial functions not yet supported in tools." We must use instead this format:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=lambda y: foo(x="bar",y=y),
name = "foo",
description="foobar"
)
```
It would be nice to again support partials. | https://github.com/langchain-ai/langchain/issues/4053 | https://github.com/langchain-ai/langchain/pull/4058 | 7e967aa4d581bec8b29e9ea44267505b0bad18b9 | afa9d1292b0a152e36d338dde7b02f0b93bd37d9 | "2023-05-03T17:28:46Z" | python | "2023-05-03T20:16:41Z" | langchain/tools/base.py | """Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@abstractmethod
def _run(
self,
*args: Any,
**kwargs: Any,
) -> Any:
"""Use the tool.
Add run_manager: Optional[CallbackManagerForToolRun] = None
to child implementations to enable tracing,
"""
@abstractmethod
async def _arun(
self,
*args: Any,
**kwargs: Any,
) -> Any:
"""Use the tool asynchronously.
Add run_manager: Optional[AsyncCallbackManagerForToolRun] = None
to child implementations to enable tracing,
"""
def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 4,053 | Tools with partials (Partial functions not yet supported in tools) | We commonly used this pattern to create tools:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=partial(foo, "bar"),
name = "foo",
description="foobar"
)
```
which as of 0.0.148 (I think) gives a pydantic error "Partial functions not yet supported in tools." We must use instead this format:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=lambda y: foo(x="bar",y=y),
name = "foo",
description="foobar"
)
```
It would be nice to again support partials. | https://github.com/langchain-ai/langchain/issues/4053 | https://github.com/langchain-ai/langchain/pull/4058 | 7e967aa4d581bec8b29e9ea44267505b0bad18b9 | afa9d1292b0a152e36d338dde7b02f0b93bd37d9 | "2023-05-03T17:28:46Z" | python | "2023-05-03T20:16:41Z" | langchain/tools/base.py | if isinstance(tool_input, str):
return (tool_input,), {}
else:
return (), tool_input
def run(
self,
tool_input: Union[str, Dict],
verbose: Optional[bool] = None,
start_color: Optional[str] = "green",
color: Optional[str] = "green",
callbacks: Callbacks = None,
**kwargs: Any,
) -> Any:
"""Run the tool."""
self._parse_input(tool_input)
if not self.verbose and verbose is not None:
verbose_ = verbose
else:
verbose_ = self.verbose
callback_manager = CallbackManager.configure(
callbacks, self.callbacks, verbose=verbose_
)
new_arg_supported = signature(self._run).parameters.get("run_manager")
run_manager = callback_manager.on_tool_start(
{"name": self.name, "description": self.description}, |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 4,053 | Tools with partials (Partial functions not yet supported in tools) | We commonly used this pattern to create tools:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=partial(foo, "bar"),
name = "foo",
description="foobar"
)
```
which as of 0.0.148 (I think) gives a pydantic error "Partial functions not yet supported in tools." We must use instead this format:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=lambda y: foo(x="bar",y=y),
name = "foo",
description="foobar"
)
```
It would be nice to again support partials. | https://github.com/langchain-ai/langchain/issues/4053 | https://github.com/langchain-ai/langchain/pull/4058 | 7e967aa4d581bec8b29e9ea44267505b0bad18b9 | afa9d1292b0a152e36d338dde7b02f0b93bd37d9 | "2023-05-03T17:28:46Z" | python | "2023-05-03T20:16:41Z" | langchain/tools/base.py | tool_input if isinstance(tool_input, str) else str(tool_input),
color=start_color,
**kwargs,
)
try:
tool_args, tool_kwargs = self._to_args_and_kwargs(tool_input)
observation = (
self._run(*tool_args, run_manager=run_manager, **tool_kwargs)
if new_arg_supported
else self._run(*tool_args, **tool_kwargs)
)
except (Exception, KeyboardInterrupt) as e:
run_manager.on_tool_error(e)
raise e
run_manager.on_tool_end(str(observation), color=color, name=self.name, **kwargs)
return observation
async def arun(
self,
tool_input: Union[str, Dict],
verbose: Optional[bool] = None,
start_color: Optional[str] = "green",
color: Optional[str] = "green",
callbacks: Callbacks = None,
**kwargs: Any,
) -> Any:
"""Run the tool asynchronously."""
self._parse_input(tool_input)
if not self.verbose and verbose is not None:
verbose_ = verbose
else: |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 4,053 | Tools with partials (Partial functions not yet supported in tools) | We commonly used this pattern to create tools:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=partial(foo, "bar"),
name = "foo",
description="foobar"
)
```
which as of 0.0.148 (I think) gives a pydantic error "Partial functions not yet supported in tools." We must use instead this format:
```py
from langchain.tools import Tool
from functools import partial
def foo(x, y):
return y
Tool.from_function(
func=lambda y: foo(x="bar",y=y),
name = "foo",
description="foobar"
)
```
It would be nice to again support partials. | https://github.com/langchain-ai/langchain/issues/4053 | https://github.com/langchain-ai/langchain/pull/4058 | 7e967aa4d581bec8b29e9ea44267505b0bad18b9 | afa9d1292b0a152e36d338dde7b02f0b93bd37d9 | "2023-05-03T17:28:46Z" | python | "2023-05-03T20:16:41Z" | langchain/tools/base.py | verbose_ = self.verbose
callback_manager = AsyncCallbackManager.configure(
callbacks, self.callbacks, verbose=verbose_
)
new_arg_supported = signature(self._arun).parameters.get("run_manager")
run_manager = await callback_manager.on_tool_start(
{"name": self.name, "description": self.description},
tool_input if isinstance(tool_input, str) else str(tool_input),
color=start_color,
**kwargs,
)
try:
tool_args, tool_kwargs = self._to_args_and_kwargs(tool_input)
observation = (
await self._arun(*tool_args, run_manager=run_manager, **tool_kwargs)
if new_arg_supported
else await self._arun(*tool_args, **tool_kwargs)
)
except (Exception, KeyboardInterrupt) as e:
await run_manager.on_tool_error(e)
raise e
await run_manager.on_tool_end(
str(observation), color=color, name=self.name, **kwargs
)
return observation
def __call__(self, tool_input: str, callbacks: Callbacks = None) -> str:
"""Make tool callable."""
return self.run(tool_input, callbacks=callbacks)
class Tool(BaseTool): |